blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6abe0bee3a06fab69de5f3271544996e2c44bdba | 1c1f8e6e66dad142d35c88710d52b25d6e0640c6 | /tests/inventory/test_macos_apps_views.py | 765458fba224a26a3e12594b44e0465091fe4957 | [
"Apache-2.0"
] | permissive | ChefAustin/zentral | 4a5190434f9010f71385bff4c2b6f02120b651ed | 1749eeb2a0c727d3bff7a3b893158c6fe36e9d9c | refs/heads/main | 2023-01-23T00:52:12.417530 | 2020-12-03T07:41:36 | 2020-12-03T07:41:36 | 318,111,990 | 1 | 0 | Apache-2.0 | 2020-12-03T07:33:27 | 2020-12-03T07:33:27 | null | UTF-8 | Python | false | false | 6,759 | py | from datetime import datetime
from django.urls import reverse
from django.utils.http import urlencode
from django.test import TestCase, override_settings
from zentral.contrib.inventory.models import MachineSnapshotCommit
from accounts.models import User
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class MacOSAppsViewsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
# user
cls.pwd = "godzillapwd"
cls.user = User.objects.create_user("godzilla", "godzilla@zentral.io", cls.pwd)
# machine snapshot
cls.computer_name = "yolozulu"
source = {"module": "tests.zentral.io", "name": "Zentral Tests"}
tree = {
"source": source,
"business_unit": {"name": "yo bu",
"reference": "bu1",
"source": source,
"links": [{"anchor_text": "bu link",
"url": "http://bu-link.de"}]},
"groups": [{"name": "yo grp",
"reference": "grp1",
"source": source,
"links": [{"anchor_text": "group link",
"url": "http://group-link.de"}]}],
"serial_number": "0123456789",
"system_info": {"computer_name": cls.computer_name},
"os_version": {'name': 'OS X', 'major': 10, 'minor': 11, 'patch': 1},
"osx_app_instances": [
{'app': {'bundle_id': 'io.zentral.baller',
'bundle_name': 'Baller.app',
'bundle_version': '123',
'bundle_version_str': '1.2.3'},
'bundle_path': "/Applications/Baller.app",
'signed_by': {
"common_name": "Developer ID Application: GODZILLA",
"organization": "GOZILLA INC",
"organizational_unit": "ATOM",
"sha_1": 40 * "a",
"sha_256": 64 * "a",
"valid_from": datetime(2015, 1, 1),
"valid_until": datetime(2026, 1, 1),
"signed_by": {
"common_name": "Developer ID Certification Authority",
"organization": "Apple Inc.",
"organizational_unit": "Apple Certification Authority",
"sha_1": "3b166c3b7dc4b751c9fe2afab9135641e388e186",
"sha_256": "7afc9d01a62f03a2de9637936d4afe68090d2de18d03f29c88cfb0b1ba63587f",
"valid_from": datetime(2012, 12, 1),
"valid_until": datetime(2027, 12, 1),
"signed_by": {
"common_name": "Apple Root CA",
"organization": "Apple Inc.",
"organizational_unit": "Apple Certification Authority",
"sha_1": "611e5b662c593a08ff58d14ae22452d198df6c60",
"sha_256": "b0b1730ecbc7ff4505142c49f1295e6eda6bcaed7e2c68c5be91b5a11001f024",
"valid_from": datetime(2006, 4, 25),
"valid_until": datetime(2035, 2, 9)
}
}
}}
]
}
_, cls.ms = MachineSnapshotCommit.objects.commit_machine_snapshot_tree(tree)
cls.osx_app_instance = cls.ms.osx_app_instances.all()[0]
cls.osx_app = cls.osx_app_instance.app
def log_user_in(self):
self.client.post(reverse('login'), {'username': self.user.username, 'password': self.pwd})
def test_macos_apps(self):
self.log_user_in()
response = self.client.get(reverse("inventory:macos_apps"))
self.assertContains(response, "1 macOS application")
def test_macos_apps_sha_256_search(self):
self.log_user_in()
# cert signature
response = self.client.get("{}?{}".format(
reverse("inventory:macos_apps"),
urlencode({"sha_256": "7afc9d01a62f03a2de9637936d4afe68090d2de18d03f29c88cfb0b1ba63587f"})
))
self.assertContains(response, "1 macOS application")
# binary signature
response = self.client.get("{}?{}".format(
reverse("inventory:macos_apps"),
urlencode({"sha_256": 64 * "a"})
))
self.assertContains(response, "1 macOS application")
# bad sha 256
response = self.client.get("{}?{}".format(
reverse("inventory:macos_apps"),
urlencode({"sha_256": 64 * "z"})
))
self.assertFormError(response, "search_form", "sha_256", "Enter a valid sha256.")
# another sha 256
response = self.client.get("{}?{}".format(
reverse("inventory:macos_apps"),
urlencode({"sha_256": 64 * "f"})
))
self.assertContains(response, "0 macOS applications")
def test_macos_apps_source_search(self):
self.log_user_in()
response = self.client.get("{}?{}".format(
reverse("inventory:macos_apps"),
urlencode({"source": self.ms.source.id})
))
self.assertContains(response, "1 macOS application")
def test_macos_apps_bundle_name(self):
self.log_user_in()
response = self.client.get("{}?{}".format(
reverse("inventory:macos_apps"),
urlencode({"bundle_name": "baller"})
))
self.assertContains(response, "1 macOS application")
response = self.client.get("{}?{}".format(
reverse("inventory:macos_apps"),
urlencode({"bundle_name": "yolo"})
))
self.assertContains(response, "0 macOS applications")
def test_macos_app(self):
self.log_user_in()
response = self.client.get(reverse("inventory:macos_app", args=(self.osx_app.id,)))
self.assertContains(response, "Baller.app 1.2.3")
self.assertContains(response, "1 application instance")
self.assertContains(response, self.osx_app_instance.signed_by.sha_256)
def test_macos_app_instance_machines(self):
self.log_user_in()
response = self.client.get(reverse("inventory:macos_app_instance_machines",
args=(self.osx_app.id, self.osx_app_instance.id)),
follow=True)
self.assertContains(response, "Baller.app 1.2.3")
self.assertContains(response, "1 Machine")
self.assertContains(response, self.osx_app_instance.signed_by.sha_256)
self.assertContains(response, self.computer_name)
| [
"eric.falconnier@112hz.com"
] | eric.falconnier@112hz.com |
a3c79f13e09b4df0d392ba14bcce693c8a8e0004 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/scaleform/daapi/view/lobby/header/BattleTypeSelectPopover.py | 17cc2c813eae3014346e7d39c69d740d4c574aec | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 4,906 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/header/BattleTypeSelectPopover.py
from adisp import process
from frameworks.wulf import WindowLayer
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.daapi.view.lobby.header import battle_selector_items
from gui.Scaleform.daapi.view.meta.BattleTypeSelectPopoverMeta import BattleTypeSelectPopoverMeta
from gui.Scaleform.framework.managers.containers import POP_UP_CRITERIA
from gui.Scaleform.framework.managers.loaders import SFViewLoadParams
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.Scaleform.locale.TOOLTIPS import TOOLTIPS
from gui.prb_control.settings import PREBATTLE_ACTION_NAME
from gui.shared import EVENT_BUS_SCOPE
from gui.shared.events import LoadViewEvent
from helpers import dependency
from skeletons.gui.game_control import IRankedBattlesController
from skeletons.gui.lobby_context import ILobbyContext
class BattleTypeSelectPopover(BattleTypeSelectPopoverMeta):
__rankedController = dependency.descriptor(IRankedBattlesController)
__lobbyContext = dependency.descriptor(ILobbyContext)
def __init__(self, _=None):
super(BattleTypeSelectPopover, self).__init__()
self._tooltip = None
return
def selectFight(self, actionName):
self.__selectFight(actionName)
def getTooltipData(self, itemData, itemIsDisabled):
if itemData is None:
return
else:
tooltip = ''
isSpecial = False
if itemData == PREBATTLE_ACTION_NAME.RANDOM:
tooltip = TOOLTIPS.BATTLETYPES_STANDART
elif itemData == PREBATTLE_ACTION_NAME.EPIC:
tooltip, isSpecial = self.__getEpicAvailabilityData()
elif itemData == PREBATTLE_ACTION_NAME.RANKED:
tooltip, isSpecial = self.__getRankedAvailabilityData()
elif itemData == PREBATTLE_ACTION_NAME.E_SPORT:
tooltip = TOOLTIPS.BATTLETYPES_UNIT
elif itemData == PREBATTLE_ACTION_NAME.STRONGHOLDS_BATTLES_LIST:
if not itemIsDisabled:
tooltip = TOOLTIPS.BATTLETYPES_STRONGHOLDS
else:
tooltip = TOOLTIPS.BATTLETYPES_STRONGHOLDS_DISABLED
elif itemData == PREBATTLE_ACTION_NAME.TRAININGS_LIST:
tooltip = TOOLTIPS.BATTLETYPES_TRAINING
elif itemData == PREBATTLE_ACTION_NAME.EPIC_TRAINING_LIST:
tooltip = TOOLTIPS.BATTLETYPES_EPIC_TRAINING
elif itemData == PREBATTLE_ACTION_NAME.SPEC_BATTLES_LIST:
tooltip = TOOLTIPS.BATTLETYPES_SPEC
elif itemData == PREBATTLE_ACTION_NAME.BATTLE_TUTORIAL:
tooltip = TOOLTIPS.BATTLETYPES_BATTLETUTORIAL
elif itemData == PREBATTLE_ACTION_NAME.SANDBOX:
isSpecial = True
tooltip = TOOLTIPS_CONSTANTS.BATTLE_TRAINING
elif itemData == PREBATTLE_ACTION_NAME.BATTLE_ROYALE:
tooltip = TOOLTIPS_CONSTANTS.BATTLE_ROYALE_SELECTOR_INFO
isSpecial = True
elif itemData == PREBATTLE_ACTION_NAME.MAPBOX:
tooltip = TOOLTIPS_CONSTANTS.MAPBOX_SELECTOR_INFO
isSpecial = True
elif itemData == PREBATTLE_ACTION_NAME.EVENT_BATTLE or itemData == PREBATTLE_ACTION_NAME.EVENT_SQUAD:
isSpecial = True
tooltip = TOOLTIPS_CONSTANTS.EVENT_BATTLES_SELECTOR_INFO
result = {'isSpecial': isSpecial,
'tooltip': tooltip}
self._tooltip = tooltip
return result
def demoClick(self):
demonstratorWindow = self.app.containerManager.getView(WindowLayer.WINDOW, criteria={POP_UP_CRITERIA.VIEW_ALIAS: VIEW_ALIAS.DEMONSTRATOR_WINDOW})
if demonstratorWindow is not None:
demonstratorWindow.onWindowClose()
else:
self.fireEvent(LoadViewEvent(SFViewLoadParams(VIEW_ALIAS.DEMONSTRATOR_WINDOW)), EVENT_BUS_SCOPE.LOBBY)
return
def update(self):
if not self.isDisposed():
self.as_updateS(*battle_selector_items.getItems().getVOs())
def _populate(self):
super(BattleTypeSelectPopover, self)._populate()
self.update()
def __getRankedAvailabilityData(self):
return (TOOLTIPS_CONSTANTS.RANKED_SELECTOR_INFO, True) if self.__rankedController.isAvailable() else (TOOLTIPS_CONSTANTS.RANKED_UNAVAILABLE_INFO, True)
def __getEpicAvailabilityData(self):
return (TOOLTIPS_CONSTANTS.EPIC_BATTLE_SELECTOR_INFO, True)
@process
def __selectFight(self, actionName):
navigationPossible = yield self.__lobbyContext.isHeaderNavigationPossible()
if not navigationPossible:
return
battle_selector_items.getItems().select(actionName)
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
8924fe0c6f40a20395b0789d49f31baaa30cc805 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /5S97Me79PDAefLEXv_17.py | c79aeef96fef03d2d9fb182f6d28a6120e6e6907 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py |
def lambda_to_def(code):
eq_index = code.index("=")
sem_index = code.index(":")
lambda_index = code.index("lambda")
try:
secound_sem = code.index(":",sem_index+1)
except ValueError:
secound_sem = None
if secound_sem != None:
if code[secound_sem+1] != "'": sem_index = secound_sem
return "def "+code[:eq_index-1]+"("+code[lambda_index+7:sem_index]+"):\n\treturn"+code[sem_index+1:]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
5f49a46cba72f2d75b3f06ab89300e953baf3963 | b75a259624cd91c75d584b9f9548a2b7e179a81f | /models/LSTM_Classifier.py | 83a458cfc4d9ca4942586f6f0e1d74485a9dbd0a | [] | no_license | lucaskingjade/Classification | 911f73b8ec037d9f560065aa71116650c128d721 | 506c036c141df5eee69a5f84672f9635e0ad242b | refs/heads/master | 2021-01-20T06:43:26.815927 | 2017-06-02T07:58:54 | 2017-06-02T07:58:54 | 89,916,858 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | #This is a simple implementation of LSTM
from keras.layers import Input,Embedding,RepeatVector,Reshape,LSTM,merge,Dense
from keras.models import Model
from keras.optimizers import SGD,RMSprop
def lstm_model(max_len=200,dof=70,embd_dim=2,
hidden_dim_list=[100,20],activation_list=['tanh','tanh'],
optimizer ='sgd',lr='0.01',momentum=0.0):
input = Input(shape = (max_len,dof),name='input')
label_input =Input(shape=(1,),name='label_input')
embd_label = Embedding(input_dim=8,output_dim=embd_dim)(label_input)
embd_label = Reshape(target_shape=(embd_dim,))(embd_label)
embd_label = RepeatVector(max_len)(embd_label)
encoded = merge([input, embd_label], mode='concat',concat_axis=2)
for i, (dim, activation) in enumerate(zip(hidden_dim_list, activation_list)):
if i ==len(hidden_dim_list)-1:
encoded = LSTM(output_dim=dim, activation=activation, return_sequences=False)(encoded)
else:
encoded = LSTM(output_dim=dim, activation=activation, return_sequences=True)(encoded)
encoded = Dense(output_dim=8, activation='sigmoid')(encoded)
model = Model(input=[input, label_input], output = encoded, name='Encoder')
if optimizer=='sgd':
optimizer_model = SGD(lr=lr,momentum=momentum)
elif optimizer=='rmsprop':
optimizer_model = RMSprop(lr=lr)
else:
raise ValueError('No such kind optimizer')
#compile model
model.compile(optimizer=optimizer_model,loss='binary_crossentropy',metrics='accuracy')
model.summary()
return model | [
"wangqi531@hotmail.com"
] | wangqi531@hotmail.com |
eaf7a34c14cd4e1b28388a48ae0963692d9f010e | afebbb07b2b4eada17a5853c1ce63b4075d280df | /marketsim/gen/_out/orderbook/_VolumeLevels.py | b906b3b76b1fa1729242c7a4bbcd934491c3730f | [] | no_license | peter1000/marketsimulator | 8c0a55fc6408b880311d3ad49defc55e9af57824 | 1b677200a9d5323f2970c83f076c2b83d39d4fe6 | refs/heads/master | 2021-01-18T01:39:04.869755 | 2015-03-29T17:47:24 | 2015-03-29T17:47:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,579 | py | # generated with class generator.python.intrinsic_observable$Import
from marketsim import registry
from marketsim.gen._out._observable._observableivolumelevels import ObservableIVolumeLevels
from marketsim.gen._out._ivolumelevels import IVolumeLevels
from marketsim.gen._intrinsic.orderbook.volume_levels import VolumeLevels_Impl
from marketsim.gen._out._iorderqueue import IOrderQueue
@registry.expose(["Asset", "VolumeLevels"])
class VolumeLevels_IOrderQueueFloatInt(ObservableIVolumeLevels,VolumeLevels_Impl):
""" **Returns arrays of levels for given volumes [i*volumeDelta for i in range(0, volumeCount)]**
Level of volume V is a price at which cumulative volume of better orders is V
Parameters are:
**queue**
**volumeDelta**
distance between two volumes
**volumeCount**
number of volume levels to track
"""
def __init__(self, queue = None, volumeDelta = None, volumeCount = None):
from marketsim.gen._out._ivolumelevels import IVolumeLevels
from marketsim.gen._out._observable._observableivolumelevels import ObservableIVolumeLevels
from marketsim.gen._out.orderbook._asks import Asks_IOrderBook as _orderbook_Asks_IOrderBook
from marketsim import deref_opt
ObservableIVolumeLevels.__init__(self)
self.queue = queue if queue is not None else deref_opt(_orderbook_Asks_IOrderBook())
self.volumeDelta = volumeDelta if volumeDelta is not None else 30.0
self.volumeCount = volumeCount if volumeCount is not None else 10
VolumeLevels_Impl.__init__(self)
@property
def label(self):
return repr(self)
_properties = {
'queue' : IOrderQueue,
'volumeDelta' : float,
'volumeCount' : int
}
def __repr__(self):
return "VolumeLevels(%(queue)s)" % dict([ (name, getattr(self, name)) for name in self._properties.iterkeys() ])
def bind_ex(self, ctx):
if self.__dict__.get('_bound_ex', False): return
self.__dict__['_bound_ex'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
self.__dict__['_ctx_ex'] = ctx.updatedFrom(self)
if hasattr(self, '_internals'):
for t in self._internals:
v = getattr(self, t)
if type(v) in [list, set]:
for w in v: w.bind_ex(self.__dict__['_ctx_ex'])
else:
v.bind_ex(self.__dict__['_ctx_ex'])
self.queue.bind_ex(self._ctx_ex)
self.bind_impl(self.__dict__['_ctx_ex'])
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.bind_ex(self.__dict__['_ctx_ex'])
self.__dict__['_processing_ex'] = False
def reset_ex(self, generation):
if self.__dict__.get('_reset_generation_ex', -1) == generation: return
self.__dict__['_reset_generation_ex'] = generation
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
if hasattr(self, '_internals'):
for t in self._internals:
v = getattr(self, t)
if type(v) in [list, set]:
for w in v: w.reset_ex(generation)
else:
v.reset_ex(generation)
self.queue.reset_ex(generation)
self.reset()
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.reset_ex(generation)
self.__dict__['_processing_ex'] = False
def typecheck(self):
from marketsim import rtti
from marketsim.gen._out._iorderqueue import IOrderQueue
rtti.typecheck(IOrderQueue, self.queue)
rtti.typecheck(float, self.volumeDelta)
rtti.typecheck(int, self.volumeCount)
def registerIn(self, registry):
if self.__dict__.get('_id', False): return
self.__dict__['_id'] = True
if self.__dict__.get('_processing_ex', False):
raise Exception('cycle detected')
self.__dict__['_processing_ex'] = True
registry.insert(self)
self.queue.registerIn(registry)
if hasattr(self, '_subscriptions'):
for s in self._subscriptions: s.registerIn(registry)
if hasattr(self, '_internals'):
for t in self._internals:
v = getattr(self, t)
if type(v) in [list, set]:
for w in v: w.registerIn(registry)
else:
v.registerIn(registry)
self.__dict__['_processing_ex'] = False
def bind_impl(self, ctx):
VolumeLevels_Impl.bind_impl(self, ctx)
def reset(self):
VolumeLevels_Impl.reset(self)
def VolumeLevels(queue = None,volumeDelta = None,volumeCount = None):
from marketsim.gen._out._iorderqueue import IOrderQueue
from marketsim import rtti
if queue is None or rtti.can_be_casted(queue, IOrderQueue):
if volumeDelta is None or rtti.can_be_casted(volumeDelta, float):
if volumeCount is None or rtti.can_be_casted(volumeCount, int):
return VolumeLevels_IOrderQueueFloatInt(queue,volumeDelta,volumeCount)
raise Exception('Cannot find suitable overload for VolumeLevels('+str(queue) +':'+ str(type(queue))+','+str(volumeDelta) +':'+ str(type(volumeDelta))+','+str(volumeCount) +':'+ str(type(volumeCount))+')')
| [
"anton.kolotaev@gmail.com"
] | anton.kolotaev@gmail.com |
3e99befc5438312e713a1e78d14e1bc4f6d79697 | f9c98f9c127fa1cd9fba17abe17199fb5440b36b | /md_rahaman/python/full_stack_django/dojo_secret/apps/dojo_secret_app/migrations/0001_initial.py | 8a6981c5b5c67aeb2eb76efcddae0dfeca2b27b1 | [] | no_license | RibRibble/python_april_2017 | 162e543f97afc77d44fcc858106e4730d3f7f760 | 3cc4240d371a8bad8da2ea085e3675272cca2de3 | refs/heads/master | 2021-01-19T01:12:34.667828 | 2017-04-27T22:11:53 | 2017-04-27T22:11:53 | 87,233,010 | 1 | 0 | null | 2017-04-04T20:41:44 | 2017-04-04T20:41:44 | null | UTF-8 | Python | false | false | 1,758 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-04-20 21:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Secret',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('secret', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='secret',
name='likes',
field=models.ManyToManyField(related_name='secrets_liked', to='dojo_secret_app.User'),
),
migrations.AddField(
model_name='secret',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='secrets', to='dojo_secret_app.User'),
),
]
| [
"soikatesc@gmail.com"
] | soikatesc@gmail.com |
b6b21288b2195932f2a24c5efb924383b55353da | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /2iQhC3t4SDZ6LGMWw_20.py | 495d30cd14904c8d1eb8876a74a2a751502893ec | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | """
Given a list of 2D points `[x, y]`, create a function that returns `True` if
those points can be on the _bounds_ of a rectangle, `False` otherwise.

### Examples
on_rectangle_bounds([[0, 1], [1, 0], [1, 1], [0, 0]]) ➞ True
on_rectangle_bounds([[0, 1], [1, 0], [1, 1], [0.5, 0.5]]) ➞ False
on_rectangle_bounds([[0, 1], [10, 0], [10, 1]]) ➞ True
on_rectangle_bounds([[0, 1]]) ➞ True
### Notes
Only rectangles with sides parallel to _x-axis_ and _y-axis_ will be
considered.
"""
def on_rectangle_bounds(dots):
bounds = [[min(axis), max(axis)] for axis in zip(*dots)]
return all(map(lambda dot: dot[0] in bounds[0] or dot[1] in bounds[1], dots))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
4397382269e0dec61503218124ea7997ba367489 | 0bcab6ea00eeb88516c218604d96c9cfc9ea5642 | /cs212-docp/07-18.py | 1a3dfb413c3c95e0814933cab7a4d0d8af72b932 | [] | no_license | alexchonglian/udacity | c11ffbbdbf19aa4247ec917ec51d3baab1411fa4 | cc3d990542fd1d14574ca42b64ebdd2d31f99d93 | refs/heads/master | 2022-08-28T17:17:10.428118 | 2020-09-10T02:12:40 | 2020-09-10T02:12:40 | 187,312,321 | 0 | 0 | null | 2022-07-29T23:11:54 | 2019-05-18T04:04:07 | Jupyter Notebook | UTF-8 | Python | false | false | 1,253 | py | # --------------
# User Instructions
#
# Fill out the function match(pattern, text), so that
# remainders is properly assigned.
def match(pattern, text):
"Match pattern against start of text; return longest match found or None."
remainders = pattern(text)
if remainders:
shortest = min(remainders, key=len)
return text[:len(text)-len(shortest)]
def lit(s): return lambda t: set([t[len(s):]]) if t.startswith(s) else null
def seq(x, y): return lambda t: set().union(*map(y, x(t)))
def alt(x, y): return lambda t: x(t) | y(t)
def oneof(chars): return lambda t: set([t[1:]]) if (t and t[0] in chars) else null
dot = lambda t: set([t[1:]]) if t else null
eol = lambda t: set(['']) if t == '' else null
def star(x): return lambda t: (set([t]) |
set(t2 for t1 in x(t) if t1 != t
for t2 in star(x)(t1)))
null = frozenset([])
def test():
assert match(star(lit('a')), 'aaaaabbbaa') == 'aaaaa'
assert match(lit('hello'), 'hello how are you?') == 'hello'
assert match(lit('x'), 'hello how are you?') == None
assert match(oneof('xyz'), 'x**2 + y**2 = r**2') == 'x'
assert match(oneof('xyz'), ' x is here!') == None
return 'tests pass' | [
"alexchonglian@gmail.com"
] | alexchonglian@gmail.com |
f6449926a5b1dd8b1dc863a3cf3df671d50dc093 | f47863b3a595cbe7ec1c02040e7214481e4f078a | /plugins/scan/discuz/449.py | 79f9d31a89d3baa6200731d2dbeb750ee717ca15 | [] | no_license | gobiggo/0bscan | fe020b8f6f325292bda2b1fec25e3c49a431f373 | 281cf7c5c2181907e6863adde27bd3977b4a3474 | refs/heads/master | 2020-04-10T20:33:55.008835 | 2018-11-17T10:05:41 | 2018-11-17T10:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#__author__ = 'darkkid'
# Name:Discuz! X3 tools
def assign(service, arg):
if service == "discuz":
return True, arg
def audit(arg):
payload = 'source/plugin/tools/tools.php'
verify_url = arg + payload
code, head, res, errcode, _ = curl.curl(verify_url)
if code == 200 and "Discuz" in res:
security_warning(verify_url + ' Discuz! X3 tools')
if __name__ == '__main__':
audit(assign('discuz', 'http://www.example.com/')[1]) | [
"zer0i3@aliyun.com"
] | zer0i3@aliyun.com |
3ec05c59ff77a33bf89d81b92c7d0dac1f34f4f0 | 22da4a564696d905bed0e4f21a1cb724fcadbbcf | /frappe/commands/scheduler.py | af994a5fc4d40ed500e53f051b472ea519e0b85c | [
"MIT"
] | permissive | ektai/erp2Dodock | b96512b112183a71d79c12513216b3fc6dd9293f | 5ad64b01cba9b07437f9a27751101258679379e8 | refs/heads/master | 2023-01-02T14:00:12.701247 | 2020-10-28T07:18:45 | 2020-10-28T07:18:45 | 305,179,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,466 | py | from __future__ import unicode_literals, absolute_import, print_function
import click
import sys
import frappe
from frappe.utils import cint
from frappe.commands import pass_context, get_site
def _is_scheduler_enabled():
enable_scheduler = False
try:
frappe.connect()
enable_scheduler = cint(frappe.db.get_single_value("System Settings", "enable_scheduler")) and True or False
except:
pass
finally:
frappe.db.close()
return enable_scheduler
@click.command('trigger-scheduler-event')
@click.argument('event')
@pass_context
def trigger_scheduler_event(context, event):
"Trigger a scheduler event"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.trigger(site, event, now=True)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('enable-scheduler')
@pass_context
def enable_scheduler(context):
"Enable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.enable_scheduler()
frappe.db.commit()
print("Enabled for", site)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('disable-scheduler')
@pass_context
def disable_scheduler(context):
"Disable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.disable_scheduler()
frappe.db.commit()
print("Disabled for", site)
finally:
frappe.destroy()
if not context.sites:
raise SiteNotSpecifiedError
@click.command('scheduler')
@click.option('--site', help='site name')
@click.argument('state', type=click.Choice(['pause', 'resume', 'disable', 'enable']))
@pass_context
def scheduler(context, state, site=None):
from frappe.installer import update_site_config
import frappe.utils.scheduler
if not site:
site = get_site(context)
try:
frappe.init(site=site)
if state == 'pause':
update_site_config('pause_scheduler', 1)
elif state == 'resume':
update_site_config('pause_scheduler', 0)
elif state == 'disable':
frappe.connect()
frappe.utils.scheduler.disable_scheduler()
frappe.db.commit()
elif state == 'enable':
frappe.connect()
frappe.utils.scheduler.enable_scheduler()
frappe.db.commit()
print('Scheduler {0}d for site {1}'.format(state, site))
finally:
frappe.destroy()
@click.command('set-maintenance-mode')
@click.option('--site', help='site name')
@click.argument('state', type=click.Choice(['on', 'off']))
@pass_context
def set_maintenance_mode(context, state, site=None):
from frappe.installer import update_site_config
if not site:
site = get_site(context)
try:
frappe.init(site=site)
update_site_config('maintenance_mode', 1 if (state == 'on') else 0)
finally:
frappe.destroy()
@click.command('doctor') #Passing context always gets a site and if there is no use site it breaks
@click.option('--site', help='site name')
@pass_context
def doctor(context, site=None):
"Get diagnostic info about background workers"
from frappe.utils.doctor import doctor as _doctor
if not site:
site = get_site(context, raise_err=False)
return _doctor(site=site)
@click.command('show-pending-jobs')
@click.option('--site', help='site name')
@pass_context
def show_pending_jobs(context, site=None):
"Get diagnostic info about background jobs"
from frappe.utils.doctor import pending_jobs as _pending_jobs
if not site:
site = get_site(context)
with frappe.init_site(site):
pending_jobs = _pending_jobs(site=site)
return pending_jobs
@click.command('purge-jobs')
@click.option('--site', help='site name')
@click.option('--queue', default=None, help='one of "low", "default", "high')
@click.option('--event', default=None, help='one of "all", "weekly", "monthly", "hourly", "daily", "weekly_long", "daily_long"')
def purge_jobs(site=None, queue=None, event=None):
"Purge any pending periodic tasks, if event option is not given, it will purge everything for the site"
from frappe.utils.doctor import purge_pending_jobs
frappe.init(site or '')
count = purge_pending_jobs(event=event, site=site, queue=queue)
print("Purged {} jobs".format(count))
@click.command('schedule')
def start_scheduler():
from frappe.utils.scheduler import start_scheduler
start_scheduler()
@click.command('worker')
@click.option('--queue', type=str)
@click.option('--quiet', is_flag = True, default = False, help = 'Hide Log Outputs')
def start_worker(queue, quiet = False):
from frappe.utils.background_jobs import start_worker
start_worker(queue, quiet = quiet)
@click.command('ready-for-migration')
@click.option('--site', help='site name')
@pass_context
def ready_for_migration(context, site=None):
from frappe.utils.doctor import get_pending_jobs
if not site:
site = get_site(context)
try:
frappe.init(site=site)
pending_jobs = get_pending_jobs(site=site)
if pending_jobs:
print('NOT READY for migration: site {0} has pending background jobs'.format(site))
sys.exit(1)
else:
print('READY for migration: site {0} does not have any background jobs'.format(site))
return 0
finally:
frappe.destroy()
commands = [
disable_scheduler,
doctor,
enable_scheduler,
purge_jobs,
ready_for_migration,
scheduler,
set_maintenance_mode,
show_pending_jobs,
start_scheduler,
start_worker,
trigger_scheduler_event,
]
| [
"63931935+ektai@users.noreply.github.com"
] | 63931935+ektai@users.noreply.github.com |
b21593202db3ec2c9fca713de182051eab520cf7 | 01822d2ae38a95edcd188a51c377bb07b0a0c57d | /Notes/Sprint1/MaxMinInt.py | 05aac91498379a492ba180a67bfa4816f3851804 | [
"MIT"
] | permissive | mark-morelos/CS_Notes | bc298137971295023e5e3caf964fe7d3f8cf1af9 | 339c47ae5d7e678b7ac98d6d78857d016c611e38 | refs/heads/main | 2023-03-10T11:56:52.691282 | 2021-03-02T15:09:31 | 2021-03-02T15:09:31 | 338,211,631 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | """
Challenge #10:
Given a string of space separated integers, write a function that returns the
maximum and minimum integers.
Example:
- max_and_min("1 2 3 4 5") -> "5 1"
- max_and_min("1 2 -3 4 5") -> "5 -3"
- max_and_min("1 9 3 4 -5") -> "9 -5"
Notes:
- All inputs are valid integers.
- There will always be at least one number in the input string.
- The return string must be two numbers separated by a single space, and
the maximum number is first.
"""
# def max_and_min(input_str):
# Your code here
def csAntyhingButFive(start, end):
rangeNum = range(start, end)
return rangeNum
csAntyhingButFive(2, 5) | [
"makoimorelos@gmail.com"
] | makoimorelos@gmail.com |
e48e2e127a3980f24a9bca937240644815c580b0 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /cXfcK7iXpuZ67taSh_6.py | 4967e77250cf5ea5287e57b8266ffdaa52afb031 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | """
This is a **reverse coding challenge**. Normally you're given explicit
directions with how to create a function. Here, you must generate your own
function to satisfy the relationship between the inputs and outputs.
Your task is to create a function that, when fed the inputs below, produce the
sample outputs shown.
### Examples
"A4B5C2" ➞ "AAAABBBBBCC"
"C2F1E5" ➞ "CCFEEEEE"
"T4S2V2" ➞ "TTTTSSVV"
"A1B2C3D4" ➞ "ABBCCCDDDD"
### Notes
If you get stuck, check the **Comments** for help.
"""
def mystery_func(txt):
letters = txt[::2]
nums = txt[1::2]
final = []
for i in zip(letters, nums):
final.append(i[0]*int(i[1]))
return ''.join(final)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
db7e523a58f687eac59bf3abe458af25e9ee7bb6 | 06f0ae3ecaaf47b1c23e231838afa524d8446f5e | /contest/migrations/0005_auto_20160321_2239.py | 116551554284e7daca17853af53c60b75e9ed8e3 | [] | no_license | nakamotohideyoshi/draftboard-web | c20a2a978add93268617b4547654b89eda11abfd | 4796fa9d88b56f80def011e2b043ce595bfce8c4 | refs/heads/master | 2022-12-15T06:18:24.926893 | 2017-09-17T12:40:03 | 2017-09-17T12:40:03 | 224,877,650 | 0 | 0 | null | 2022-12-08T00:02:57 | 2019-11-29T15:20:17 | Python | UTF-8 | Python | false | false | 3,517 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('prize', '0006_auto_20160209_2241'),
('draftgroup', '0018_draftgroup_fantasy_points_finalized'),
('sports', '0001_squashed_0008_auto_20160119_2124'),
('contest', '0004_auto_20160321_2142'),
]
operations = [
migrations.CreateModel(
name='ContestPool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('cid', models.CharField(editable=False, blank=True, max_length=6, help_text='unique, randomly chosen when Contest is created', default='')),
('name', models.CharField(default='', max_length=64, help_text='The front-end name of the Contest', verbose_name='Public Name')),
('start', models.DateTimeField(help_text='the start should coincide with the start of a real-life game.', verbose_name='Start Time')),
('end', models.DateTimeField(blank=True, help_text='forces the end time of the contest (will override "Ends tonight" checkbox!!', verbose_name='Cutoff Time')),
('max_entries', models.PositiveIntegerField(default=1, help_text='USER entry limit')),
('entries', models.PositiveIntegerField(default=2, help_text='CONTEST limit')),
('current_entries', models.PositiveIntegerField(default=0, help_text='The current # of entries in the contest')),
('gpp', models.BooleanField(default=False, help_text='a gpp Contest will not be cancelled if it does not fill')),
('respawn', models.BooleanField(default=False, help_text='indicates whether a new identical Contest should be created when this one fills up')),
('doubleup', models.BooleanField(default=False, help_text='whether this contest has a double-up style prize structure')),
('status', models.CharField(choices=[('Scheduled', (('scheduled', 'Scheduled'),)), ('Created', (('created', 'Created'),))], default='scheduled', max_length=32)),
('draft_group', models.ForeignKey(blank=True, to='draftgroup.DraftGroup', help_text='the pool of draftable players and their salaries, for the games this contest includes.', null=True, verbose_name='DraftGroup')),
('prize_structure', models.ForeignKey(to='prize.PrizeStructure')),
('site_sport', models.ForeignKey(to='sports.SiteSport', related_name='contest_contestpool_site_sport')),
],
options={
'abstract': False,
'verbose_name_plural': 'Contest Pools',
'verbose_name': 'Contest Pools',
},
),
migrations.RemoveField(
model_name='pool',
name='draft_group',
),
migrations.RemoveField(
model_name='pool',
name='prize_structure',
),
migrations.RemoveField(
model_name='pool',
name='site_sport',
),
migrations.DeleteModel(
name='Pool',
),
migrations.CreateModel(
name='CurrentContestPool',
fields=[
],
options={
'proxy': True,
},
bases=('contest.contestpool',),
),
]
| [
"cbanister@coderden.com"
] | cbanister@coderden.com |
48d33bfa29646585b6db9ca254362c02f141d4cc | e02405f3db787275545f87a23bf7eb3510ddd2f4 | /test_main.py | 4e68d61ebcec6bfcbe8da0934a8fedd93d0a5e41 | [] | no_license | kevlab/flasktaskr_project | a51ca6d62b080fb1fdbe0fb34ad5d7cff36ba329 | 3a3ba1dfbe2571deb7958970d52437abb03b03e7 | refs/heads/master | 2021-01-17T17:07:33.064404 | 2015-06-07T14:38:25 | 2015-06-07T14:38:25 | 33,480,265 | 0 | 0 | null | 2015-04-06T13:25:34 | 2015-04-06T12:10:55 | Python | UTF-8 | Python | false | false | 1,633 | py | import os
import unittest
from project import app, db
from config import basedir
from project.models import User, Task
TEST_DB = 'test.db'
class Alltests(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
app.config['DEBUG'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(basedir, TEST_DB)
self.app = app.test_client()
db.create_all()
def tearDown(self):
db.drop_all()
def login(self, name, password):
return self.app.post('users/', data=dict(name=name, password=password),
follow_redirects=True)
def test_404_error(self):
response = self.app.get('/not-actually-a-route/')
self.assertEqual(response.status_code, 404)
self.assertIn('Sorry. There\'s nothing here.', response.data)
#def test_500_error(self):
#bad_user = User(name='baduser',
#email='baduser@gmail.com',
#password='django')
#db.session.add(bad_user)
#db.session.commit()
#response = self.login('baduser', 'django')
#self.assertEqual(response.status_code, 500)
#self.assertIn('Something went terribly wrong.', response.data)
#self.assertNotIn('ValueError: Invalid salt', response.data)
def test_index(self):
""" Ensure flask was set up properly """
response = self.app.get('/', content_type='html/text')
self.assertEqual(response.status_code, 302)
if __name__ == "__main__":
unittest.main()
| [
"greenleaf1348@gmail.com"
] | greenleaf1348@gmail.com |
ed9bd818265cb5b56fa0412338d0e74acfdcfc60 | fd390bfa1f471d09cafb72ad85e5143214abf32d | /shakecast/app/orm/migrations.py | 467a024c4ef7ab9b73b5df4072e90f4cf3b5c714 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | usgs/shakecast | 0ac6ac955aaff3029b133c4ce9264adc90004d86 | e287d697d93467e5e25c99d27b70754a0a5e6e39 | refs/heads/master | 2023-05-13T09:22:13.001195 | 2022-03-08T03:26:18 | 2022-03-08T03:26:18 | 52,902,913 | 9 | 24 | NOASSERTION | 2023-05-01T22:45:23 | 2016-03-01T19:15:42 | Python | UTF-8 | Python | false | false | 5,320 | py | from sqlalchemy import Column, Integer, String, Float, PrimaryKeyConstraint
############### DB Migrations ################
def migrate_1to2(engine):
'''
Add updated and updated_by columns to keep track of which users
are updating inventory
'''
updated = Column('updated', Integer)
updated_by = Column('updated_by', String)
try:
add_column(engine, 'user', updated)
except Exception:
pass
try:
add_column(engine, 'user', updated_by)
except Exception:
pass
try:
add_column(engine, 'group', updated)
except Exception:
pass
try:
add_column(engine, 'group', updated_by)
except Exception:
pass
try:
add_column(engine, 'facility', updated)
except Exception:
pass
try:
add_column(engine, 'facility', updated_by)
except Exception:
pass
return engine
def migrate_2to3(engine):
'''
Add updated and updated_by columns to keep track of which users
are updating inventory
'''
mms = Column('mms', String(255))
try:
add_column(engine, 'user', mms)
except Exception:
pass
return engine
def migrate_3to4(engine):
'''
Add updated and updated_by columns to keep track of which users
are updating inventory
'''
aebm = Column('aebm', String(50))
try:
add_column(engine, 'facility_shaking', aebm)
except Exception:
pass
return engine
def migrate_4to5(engine):
sent_timestamp = Column('sent_timestamp', Float)
try:
add_column(engine, 'notification', sent_timestamp)
except Exception:
pass
return engine
def migrate_5to6(engine):
type_ = Column('type', String(64))
try:
add_column(engine, 'shakemap', type_)
except Exception:
pass
try:
add_column(engine, 'event', type_)
except Exception:
pass
return engine
def migrate_6to7(engine):
epicentral_distance = Column('epicentral_distance', String(64))
try:
add_column(engine, 'facility_shaking', epicentral_distance)
except Exception:
pass
return engine
def migrate_7to8(engine):
override_directory = Column('override_directory', String(255))
try:
add_column(engine, 'shakemap', override_directory)
except Exception:
pass
try:
add_column(engine, 'event', override_directory)
except Exception:
pass
return engine
def migrate_8to9(engine):
product_string = Column('product_string', String(255))
try:
add_column(engine, 'group', product_string)
except Exception:
pass
return engine
def migrate_9to10(engine):
generated_timestamp = Column('generated_timestamp', Float)
try:
add_column(engine, 'notification', generated_timestamp)
except Exception:
pass
return engine
def migrate_10to11(engine):
file_name = Column('file_name', String)
try:
add_column(engine, 'local_product_types', file_name)
except Exception:
pass
name = Column('name', String)
try:
add_column(engine, 'local_product_types', name)
except Exception:
pass
try:
engine.execute('drop table local_product_types')
except Exception:
pass
begin_timestamp = Column('begin_timestamp', Float)
try:
add_column(engine, 'local_products', begin_timestamp)
except Exception:
pass
finish_timestamp = Column('finish_timestamp', Float, default=0)
try:
add_column(engine, 'local_products', finish_timestamp)
except Exception:
pass
error = Column('error', String(255))
try:
add_column(engine, 'notification', error)
except Exception:
pass
return engine
def migrate_11to12(engine):
update = Column('updated', Integer)
try:
add_column(engine, 'event', update)
except Exception:
pass
return engine
def migrate_12to13(engine):
dependencies = Column('dependencies', String)
tries = Column('tries', Integer, default=0)
try:
add_column(engine, 'local_product_types', dependencies)
except Exception:
pass
try:
add_column(engine, 'local_products', tries)
except Exception:
pass
return engine
def add_column(engine, table_name, column):
'''
Add a column to an existing table
'''
column_name = column.compile(dialect=engine.dialect)
column_type = column.type.compile(engine.dialect)
if 'sqlite' in str(engine):
engine.execute('ALTER TABLE "%s" ADD COLUMN %s %s' % (table_name, column_name, column_type))
elif 'mysql' in str(engine):
engine.execute('ALTER TABLE `%s` ADD COLUMN %s %s' % (table_name, column_name, column_type))
#######################################################################
# List of database migrations for export
migrations = [migrate_1to2, migrate_2to3, migrate_3to4, migrate_4to5,
migrate_5to6, migrate_6to7, migrate_7to8, migrate_8to9, migrate_9to10,
migrate_10to11, migrate_11to12, migrate_12to13]
def migrate(engine):
'''
Run all database migrations
'''
for migration in migrations:
engine = migration(engine)
return engine
| [
"dslosky@usgs.gov"
] | dslosky@usgs.gov |
5a3513f9f209d1ef28d755df10e6aa2cfc4607aa | 6f2d5600b65b062151bab88c592796b878de7465 | /InterfaceTest/common/do_mysql.py | 5eac443a50281fb43beeeb98044f8353bb135ad0 | [] | no_license | zhouyanmeng/python_api_test | 1e6549321c20ee9a71beffac2533c917b5ecc157 | 7303352c9b5baacba5296b088f89ba4c702fb485 | refs/heads/master | 2022-12-17T14:34:26.351566 | 2019-03-01T13:02:06 | 2019-03-01T13:02:06 | 185,185,856 | 0 | 0 | null | 2022-12-08T01:45:15 | 2019-05-06T11:45:55 | Python | UTF-8 | Python | false | false | 1,038 | py | import pymysql
class DoMysql:
#完成与sql数据库的交互
def __init__(self):###实例化对象的时候就建立连接
#def connect(self):
host="test.lemonban.com"
user="test"
password="test"
port=3306
self.mysql=pymysql.connect(host=host,user=user,password=password,port=3306)
#self.cursor = self.mysql.cursor()
self.cursor=self.mysql.cursor(pymysql.cursors.DictCursor)###设置返回字典游标
def fetch_one(self,sql):
self.cursor.execute(sql)
self.mysql.commit()
return self.cursor.fetchone()
def fetch_all(self,sql):
self.cursor.executee(sql)
return self.cursor.fetchall()
def close(self):
self.cursor.close()##关闭游标,查询
self.mysql.close()#关闭链接
if __name__ == '__main__':
mysql=DoMysql()###实例化,建立连接
# mysql.connect()
result=mysql.fetch_one('select max(mobilephone) from future.member')##输入查询语句
print(result)
mysql.close() | [
"2440269710@qq.com"
] | 2440269710@qq.com |
6deb89e092b5b74f23029184b835d8e808de678a | 3b81dfbacf97918d36fb5accbcef0b610378e1a8 | /data-struct/probelm/02-second-try.py | 3a304359cfc1762c450a8f2640ca20afd38eef61 | [] | no_license | XiaoFei-97/the-way-to-python | 11706f0845f56246ba8ea0df8ff34e622bbdad2d | 3667a24f4f4238998e9c6ed42cdc49c68881a529 | refs/heads/master | 2020-03-21T06:46:36.939073 | 2018-06-23T03:51:11 | 2018-06-23T03:51:11 | 138,241,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | #如果a+b+c=1000,且a^2+b^2=c^2(a,b,c为自然数),求出abc所以可能的组合
#运行时间1秒
#
#a = 0
#b = 0
#c = 0
#
import time
#
#start_time = time.time()
#for a in range(1000):
# for b in range(1000):
# for c in range(1000):
# if a+b+c == 1000 and a**2 +b**2 == c**2:
# print("a=%d,b=%d,c=%d"%(a,b,c))
#end_time = time.time()
#
#print("times:%d"%(end_time-start_time))
#T=1000 * 1000 * 1000 * 2(此处细化是10步)
#时间复杂度T(n) = n^3 * 2
#大O表示法:g(n)=n^3
start_time = time.time()
for a in range(1001):
for b in range(1001-a):
c = 1000-a-b
if a**2 +b**2 == c**2:
print("a=%d,b=%d,c=%d"%(a,b,c))
#T= 1000 * 1000 * 3
#时间复杂度T(n) = n^2 *3
#大O表示法:g(n)=n^2
end_time = time.time()
print("times:%d"%(end_time-start_time))
| [
"jack_970124@163.com"
] | jack_970124@163.com |
f41ff08bfd79dc9007fd2de6edc00cb85adf391a | c46754b9600a12df4f9d7a6320dfc19aa96b1e1d | /src/transformers/models/clipseg/__init__.py | 0e2e250e507a811c0f1cbbf45dabf236e1721e4a | [
"Apache-2.0"
] | permissive | huggingface/transformers | ccd52a0d7c59e5f13205f32fd96f55743ebc8814 | 4fa0aff21ee083d0197a898cdf17ff476fae2ac3 | refs/heads/main | 2023-09-05T19:47:38.981127 | 2023-09-05T19:21:33 | 2023-09-05T19:21:33 | 155,220,641 | 102,193 | 22,284 | Apache-2.0 | 2023-09-14T20:44:49 | 2018-10-29T13:56:00 | Python | UTF-8 | Python | false | false | 2,179 | py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_import_structure = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_clipseg"] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| [
"noreply@github.com"
] | huggingface.noreply@github.com |
83003f3e74b140fc689183f6eeb5e495015a3296 | 4e67c2edd71493a98a3f13e5b2073c1d05b1b656 | /Semestre 02/ProjetoIntegrador2/Aula 09.24.2020/aluno_get_set.py | 1569fbeb2a3d0bad8480b29df252044a40f1b2c6 | [] | no_license | felipellima83/UniCEUB | 05991d7a02b13cd4e236f3be3a34726af2dc1504 | dbc44866545b5247d1b5f76ec6e9b7778e54093e | refs/heads/master | 2023-07-08T19:04:19.830473 | 2021-08-12T12:33:49 | 2021-08-12T12:33:49 | 249,958,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,714 | py | ''' UniCEUB - Ciência da Computação - Prof. Barbosa
Atalho de teclado: ctlr <d>, duplica linha. ctrl <y>, apaga linha. ctrl </>, comenta linha
Sintaxe: POO em Python com gets e sets
class NomeClasse (object):
def __init__ (self, p_1, p_2, ...): # Método construtor
self.nome_atributo1 = p_1 # Atributos
self.nome_atributo2 = p_2
...
def get_nome_atributo1 (self): # Modelo do método get (retorna valor do atributo)
return self.nome_atributo1
def set_nome_atributo1 (self, valor): # Modelo do método set (altera valor do atributo)
self.nome_atributo1 = valor
def outro_metodo (self): # Outros métodos da classe (métodos fundionais)
...
[return ...]
if __name__ == '__main__': # mai <tab>
nome_objeto1 = NomeClasse(a_1, a_2, ...) # Cria (instancia) o primeiro objeto da classe
nome_objeto2 = NomeClasse(a_1, a_2, ...) # Cria (instancia) o segundo objeto da classe
. . .
r = nome_objeto1.get_nome_atributo1() # Consulta um atributo
print(r)
nome_objeto1.set_nome_atributo1(novo_valor) # Altera um atributo
------------------------------- Com base no modelo acima, implemente estes itens:
1- Crie a classe Aluno.
- Crie o construtor da classe com os atributos: nome, mensalidade, idade
- Crie os métodos gets e sets
- No main, crie pelo menos dois objetos da classe Aluno. Teste
5- Use os métodos gets e sets para os objetos criados
- Crie o método mostra_dados. Mostra os dados (atrigutos) dentro do método. Teste.
- Refaça o anterior sem usar o nome do atributo, crie o método mostra_dados_2. Teste
- Crie o método retorna_dados, retorne todos os dados (atributos) concatenados.Teste
- Crie o método aumento_mensalidade_valor, ele recebe o valor do aumento. Teste
10- Crie o método aumento_mensalidade_porcentagem (Recebe: 10%, 15% etc.). Teste
---
11- Altere o construtor com estes valor default: Mensalidade = 1000 e idade = 0.
- No main, crie o objeto aluno3 da classe Aluno passando apenas o nome. Teste
- No main, crie o objeto aluno4 da classe Aluno passando o nome e a mensalidade. Teste
14- No main, crie o objeto aluno5 da classe Aluno passando somente o nome e a idade.
não passe o argumento mensalidade. Teste ----- '''
class Aluno(object): # class Aluno:
def __init__(self, nome, mensalidade=1000, idade=0): # Método construtor com valores default
# def __init__(self, nome, mensalidade, idade): # Método construtor
self.nome = nome # Atributos
self.mensalidade = mensalidade
self.idade = idade
def get_nome(self): # Métodos gets e sets
return self.nome
def set_nome(self, nome):
self.nome = nome
def get_mensalidade(self):
return self.mensalidade
def set_mensalidade(self, valor):
self.mensalidade = valor
def get_idade(self):
return self.idade
def set_idade(self, idade):
self.idade = idade
def mostra_dados (self): # Metodos funcionais
print('Nome: ', self.nome)
print('Mensalidade: ', self.mensalidade)
print('Idade: ', self.idade)
def mostra_dados_2(self):
print('Nome: ', self.get_nome())
print('Mensalidade: ', self.get_mensalidade())
print('Idade: ', self.get_idade())
def retorna_dados(self):
dados = self.nome + ' - ' + str(self.mensalidade) + ' - ' + str(self.idade)
# dados = self.get_nome() + ' - ' + str(self.get_mensalidade()) + ' - ' + str(self.get_idade())
return dados
def aumento_mensalidade_valor(self, valor):
self.mensalidade += valor # self.mensalidade = self. mensalidade + valor
def aumento_mensalidade_porcentagem(self, pct):
self.mensalidade += self.mensalidade * pct / 100 # self.mensalidade=self.mensalidade + self.mensalidade * pct/100
if __name__ == '__main__': # Atalho: main <tab>
aluno1 = Aluno('Paulo', 1000, 21) # Chamando o construtor __init__
aluno2 = Aluno('Carla', 900, 20)
print("Aluno 1:")
print("Nome: ", aluno1.get_nome()) # print(nome_objeto.nome_metodo())
print("Mensalidade: ", aluno1.get_mensalidade())
print("Idade: ", aluno1.get_idade())
print("Aluno 2:")
print("Nome: ", aluno2.get_nome()) # print(nome_objeto.nome_metodo())
print("Mensalidade: ", aluno2.get_mensalidade())
print("Idade: ", aluno2.get_idade())
novo_nome = input("Novo nome: ") # Solução 1
aluno1.set_nome(novo_nome) # nome_objeto.nome_metodo()
aluno2.set_nome("João") # Solução 2
aluno1.mostra_dados()
aluno2.mostra_dados()
aluno1.mostra_dados_2()
aluno2.mostra_dados_2()
print('Dados concatenados: ', aluno1.retorna_dados())
print('Dados concatenados: ', aluno2.retorna_dados())
aluno1.aumento_mensalidade_valor(110)
print('Nova mensalidade', aluno1.get_mensalidade())
aluno1.mostra_dados_2()
aluno2.aumento_mensalidade_porcentagem(10)
aluno2.mostra_dados_2()
print('Nova mensalidade', aluno2.get_mensalidade())
aluno3 = Aluno('Ailton')
aluno3.mostra_dados()
aluno4 = Aluno('Ana', 800)
aluno4.mostra_dados()
aluno5 = Aluno('Rogério', idade = 31) # Sem passar mensalidade
aluno5.mostra_dados()
aluno6 = Aluno( idade = 30, nome= 'Vinicius') # Fora da sequência
aluno6.mostra_dados() | [
"felipellima83@gmail.com"
] | felipellima83@gmail.com |
6889f8b569293a0d5a19b7bd90753cd841b53e2d | 5b323fd78bb143667aedd64bc5ce17bc90f82370 | /量化投资书/量化投资以Python为工具/ch14/02.py | cc38019e0cd7e0daf38ee2a7614cb501b91e8e74 | [] | no_license | xiaotfeng/Some-thing-interesting-for-me | 3cbd3c70d0631b5687d5a55cac33bbfc7e4044c0 | 4fdb63376fa421f2aa17e3246a44454f62ca342e | refs/heads/master | 2023-02-04T22:04:03.210810 | 2020-12-29T05:36:37 | 2020-12-29T05:36:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | import numpy as np
import scipy.stats
import pandas as pd
np.random.binomial(100, 0.5, 20)
np.random.binomial(10, 0.3, 3)
scipy.stats.binom.pmf(20, 100, 0.5)
scipy.stats.binom.pmf(50, 100, 0.5)
dd = scipy.stats.binom.pmf(np.arange(0, 21, 1), 100, 0.5)
dd.sum()
scipy.stats.binom.cdf(20, 100, 0.5)
HSRet300 = pd.read_csv('return300.csv')
ret = HSRet300.iloc[:, 1]
print(ret.head(3))
p = len(ret[ret>0])/len(ret)
print(p)
prob = scipy.stats.binom.pmf(6, 10, p)
print(prob) | [
"395871987@qq.com"
] | 395871987@qq.com |
c43d097db7860899069dbeea9d5aab443e90d32d | 4a48593a04284ef997f377abee8db61d6332c322 | /python/scipy/convolution.py | a36f5bc1c66a64d0984fd54a799186fe02f633df | [
"MIT"
] | permissive | jeremiedecock/snippets | 8feaed5a8d873d67932ef798e16cb6d2c47609f0 | b90a444041c42d176d096fed14852d20d19adaa7 | refs/heads/master | 2023-08-31T04:28:09.302968 | 2023-08-21T07:22:38 | 2023-08-21T07:22:38 | 36,926,494 | 26 | 9 | MIT | 2023-06-06T02:17:44 | 2015-06-05T10:19:09 | Python | UTF-8 | Python | false | false | 286 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import scipy.signal
import numpy as np
import matplotlib.pyplot as plt
# The imput signal x
x = np.zeros(100)
x[30:60] = 1.
# ...
y = np.ones(30)
xc = scipy.signal.convolve(x, y)
plt.plot(x)
plt.plot(xc / 30.)
plt.show()
| [
"jd.jdhp@gmail.com"
] | jd.jdhp@gmail.com |
e34519fbbd240b89574559ceabb90d807dc27c96 | 67309cbca4ead3623c86647ac7bfaa067b029fdc | /SWEA/Tree/5176_이진탐색.py | f1a4a1749e5c902773c5aafb4225cbc310b9161b | [] | no_license | Jeukoh/OJ | b6df132927ec15ab816fee8681952240b5a69e13 | 182b54554896d9860d5e5d09f8eccc07d99aa8e8 | refs/heads/master | 2023-08-30T19:18:10.812225 | 2021-10-15T09:57:14 | 2021-10-15T09:57:14 | 402,799,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | import math
def inserttree(idx):
global cnt
if idx <= V:
inserttree(2*idx)
tree[idx] = cnt
cnt += 1
inserttree(2*idx+1)
for tc in range(1,int(input())+1):
V = int(input().rstrip())
tree = [[] for _ in range(V+1)]
cnt = 1
inserttree(1)
print(f'#{tc}', tree[1], tree[V//2]) | [
"jeukoh@gmail.com"
] | jeukoh@gmail.com |
d81c9a6c45e667fc3c2e7e5eeb80d0aeb2926124 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /log-20190927/132.230.102.123-10.21.11.31/1569571753.py | b4d079745312f44675d25424b0cf91732161be53 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def mysum(zs: list) -> int:
return sum(xs)
## Lösung Teil 2. (Tests)
def test_2():
assert mysum([1, 2, 3]) == 6
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
mysumargs = getfullargspec(mysum).args
class TestName:
def test_mysum (self):
assert mysum
assert 'xs' in mysumargs
class TestGrades:
def test_docstring_present(self):
assert False
def test_typing_present(self):
assert True
def test_coverage(self):
assert False
def sum_oracle(self, xs:list)->int:
return sum(xs)
def check_sum (self,xs):
assert mysum (xs) == self.sum_oracle (xs)
def test_correctness(self):
for i in range (100):
l = random.randrange (6)
xs = [ random.randrange (10) for z in range(l) ]
self.check_sum (xs)
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
d87f5492302e50c6f0e7424b27dbb5ec6a5e1d99 | e972d1a5eaf1e82425d0f2ef43147b1a9e817198 | /Examples/Howto/UseFonts/HelloCircleSquare.py | 3157371a905aae8164269e23f04c9e5026cebb41 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | 13-Graphics/PageBot | 86b2ba5f051b2ffaa16ee26da4747b790481ec46 | 8d815cf3fa20eb891d0e5c11253376dbe77b71e6 | refs/heads/master | 2021-09-06T21:36:58.986635 | 2018-02-11T22:21:12 | 2018-02-11T22:21:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | #!/usr/bin/env python
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens & Font Bureau
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting usage of DrawBot, www.drawbot.com
# Supporting usage of Flat, https://github.com/xxyxyz/flat
# -----------------------------------------------------------------------------
#
from random import random
from pagebot.contexts import defaultContext as c
for p in range(20):
c.newPage(1000, 1000)
for n in range(50):
c.fill(random(), 0, random(), 0.5 + random()*0.2)
ch = random()
x = 20 + random()*800
y = 20 + random()*800
if ch < 0.2:
c.oval(x, y, 80, 80 )
elif ch < 0.4:
c.rect(x, y, 80, 80 )
else:
fs = c.newString('Hello world on %d,%d' % (x, y),
style=dict(fontSize=24))
c.text(fs, (x, y))
c.saveImage('_export/OurNiceDrawing.pdf')
| [
"fsanches@metamaquina.com.br"
] | fsanches@metamaquina.com.br |
27a23d8d90b5c1ff97590888e4cb48d90f8da8d7 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/hamming/b238090e4c274187a6c3f8eafb85f032.py | 151f1fa976cb89eac830a5420ad3574fe92a6af1 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 130 | py | def distance(strand1, strand2):
dist = 0
for i in range(len(strand1)):
if strand1[i] != strand2[i]:
dist += 1
return dist
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
dcf702d039bf020112bc112189290a6a56115097 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/insights/v20160301/get_alert_rule.py | e2bb242205dcdb83a87e1935f3e3f51f683543d5 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 5,814 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetAlertRuleResult',
'AwaitableGetAlertRuleResult',
'get_alert_rule',
]
@pulumi.output_type
class GetAlertRuleResult:
"""
The alert rule resource.
"""
def __init__(__self__, actions=None, condition=None, description=None, is_enabled=None, last_updated_time=None, location=None, name=None, tags=None, type=None):
if actions and not isinstance(actions, list):
raise TypeError("Expected argument 'actions' to be a list")
pulumi.set(__self__, "actions", actions)
if condition and not isinstance(condition, dict):
raise TypeError("Expected argument 'condition' to be a dict")
pulumi.set(__self__, "condition", condition)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if is_enabled and not isinstance(is_enabled, bool):
raise TypeError("Expected argument 'is_enabled' to be a bool")
pulumi.set(__self__, "is_enabled", is_enabled)
if last_updated_time and not isinstance(last_updated_time, str):
raise TypeError("Expected argument 'last_updated_time' to be a str")
pulumi.set(__self__, "last_updated_time", last_updated_time)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def actions(self) -> Optional[Sequence[Any]]:
"""
the array of actions that are performed when the alert rule becomes active, and when an alert condition is resolved.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def condition(self) -> Any:
"""
the condition that results in the alert rule being activated.
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
the description of the alert rule that will be included in the alert email.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> bool:
"""
the flag that indicates whether the alert rule is enabled.
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> str:
"""
Last time the rule was updated in ISO8601 format.
"""
return pulumi.get(self, "last_updated_time")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetAlertRuleResult(GetAlertRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAlertRuleResult(
actions=self.actions,
condition=self.condition,
description=self.description,
is_enabled=self.is_enabled,
last_updated_time=self.last_updated_time,
location=self.location,
name=self.name,
tags=self.tags,
type=self.type)
def get_alert_rule(resource_group_name: Optional[str] = None,
rule_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAlertRuleResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group.
:param str rule_name: The name of the rule.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['ruleName'] = rule_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:insights/v20160301:getAlertRule', __args__, opts=opts, typ=GetAlertRuleResult).value
return AwaitableGetAlertRuleResult(
actions=__ret__.actions,
condition=__ret__.condition,
description=__ret__.description,
is_enabled=__ret__.is_enabled,
last_updated_time=__ret__.last_updated_time,
location=__ret__.location,
name=__ret__.name,
tags=__ret__.tags,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
6d3b4f5751fa19cc815f562e1a5788314b5b42fa | 940d7b93fb27e8eead9b6e52bc5c7444666744dd | /python/src/Lib/test/test_sunaudiodev.py | 2c5e7d4a6e0e0efd99a539e114dcbb37ed1addd3 | [
"Apache-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Python-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-python-cwi"
] | permissive | pilotx45/sl4a | d446531d310cc17d93f24aab7271a0813e8f628d | 150e3e46b5103a9b9a391034ef3fbc5bd5160d0f | refs/heads/master | 2022-03-24T19:48:30.340479 | 2022-03-08T16:23:58 | 2022-03-08T16:23:58 | 277,016,574 | 1 | 0 | Apache-2.0 | 2022-03-08T16:23:59 | 2020-07-04T01:25:36 | null | UTF-8 | Python | false | false | 678 | py | from test.test_support import findfile, TestFailed, TestSkipped, import_module
sunaudiodev = import_module('sunaudiodev', deprecated=True)
import os
try:
audiodev = os.environ["AUDIODEV"]
except KeyError:
audiodev = "/dev/audio"
if not os.path.exists(audiodev):
raise TestSkipped("no audio device found!")
def play_sound_file(path):
fp = open(path, 'r')
data = fp.read()
fp.close()
try:
a = sunaudiodev.open('w')
except sunaudiodev.error, msg:
raise TestFailed, msg
else:
a.write(data)
a.close()
def test_main():
play_sound_file(findfile('audiotest.au'))
if __name__ == '__main__':
test_main()
| [
"damonkohler@gmail.com"
] | damonkohler@gmail.com |
39b54d8b58c400cac04a9cbea46ae5e0a1b4b85f | 204693758329743d0637c53dbf76a9221620fb4e | /src/products/migrations/0001_initial.py | e4d40ef262bb622d02e388a92278ed77f4a07619 | [] | no_license | dipayandutta/djangoimproveo | 122741224795e58f970439e40112871f331258c2 | 20a19bb61eaa88e6134437f582b0f987a71cd899 | refs/heads/master | 2022-11-11T09:53:21.889847 | 2020-06-29T14:59:49 | 2020-06-29T14:59:49 | 273,153,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | # Generated by Django 3.0 on 2020-06-18 16:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('short_code', models.CharField(max_length=20)),
('updated', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"inbox.dipayan@gmail.com"
] | inbox.dipayan@gmail.com |
a2725503da8b2935c112ff3ac71d8d6368b1be50 | fb9371054b6ce236c6b202cc51c6bc98a3483060 | /python_20190615/Python网络爬虫与信息提取/BaiduStocks/BaiduStocks/spiders/stocks.py | 02363f2ae2e17bf1cd272dc9bcb77a3e83c09fcb | [] | no_license | skymoonfp/python_learning | 621d5e72c5b356fd507e4a00c463ea8d565588fb | 1e8340303809d8c7c3af3201084b158c1784f22e | refs/heads/master | 2020-06-04T10:07:27.009212 | 2019-06-19T13:52:44 | 2019-06-19T13:52:44 | 191,978,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,796 | py | # -*- coding: utf-8 -*-
import random
import re
import scrapy
class StocksSpider(scrapy.Spider):
name = 'stocks'
allowed_domains = ['baidu.com']
start_urls = ['http://quote.eastmoney.com/stock_list.html']
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
]
def parse(self, response):
for href in response.css('a::attr(href)').extract():
try:
stock = re.findall(r"[s][hz]\d{6}", href)[0]
url = "https://gupiao.baidu.com/stock/" + stock + ".html"
ua = random.choice(self.user_agent_list) # 随机抽取User-Agent
headers = {
'Accept-Encoding': 'gzip, deflate, sdch, br',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'Referer': 'https://gupiao.baidu.com/',
'User-Agent': ua
}
yield scrapy.Request(url, callback=self.parse_stock, headers=headers)
except:
continue
def parse_stock(self, response):
infoDict = dict()
stockInfo = response.css(".stock-bets")
name = stockInfo.css(".bets-name").extract()[0]
keyList = stockInfo.css("dt").extract()
valueList = stockInfo.css("dd").extract()
for i in range(len(keyList)):
key = re.findall(r'>.*</dt>', keyList[i])[0][1:-5]
try:
val = re.findall(r'\d+\.?.*</dd>', valueList[i])[0][0:-5]
except:
val = "--"
infoDict[key] = val
infoDict.update(
{"股票名称": re.findall('\s.*\(', name)[0].split()[0] + re.findall('\>.*\<', name)[0][1:-1]}
)
yield infoDict
| [
"954918@qq.com"
] | 954918@qq.com |
602cd68614e126e6881b023d30d8fa3219b9db34 | 5963c12367490ffc01c9905c028d1d5480078dec | /tests/components/tado/test_config_flow.py | 77656f1c81fd066ab3f22f5e1d614987fc455bd0 | [
"Apache-2.0"
] | permissive | BenWoodford/home-assistant | eb03f73165d11935e8d6a9756272014267d7d66a | 2fee32fce03bc49e86cf2e7b741a15621a97cce5 | refs/heads/dev | 2023-03-05T06:13:30.354545 | 2021-07-18T09:51:53 | 2021-07-18T09:51:53 | 117,122,037 | 11 | 6 | Apache-2.0 | 2023-02-22T06:16:51 | 2018-01-11T16:10:19 | Python | UTF-8 | Python | false | false | 4,912 | py | """Test the Tado config flow."""
from unittest.mock import MagicMock, patch
import requests
from homeassistant import config_entries, setup
from homeassistant.components.tado.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.common import MockConfigEntry
def _get_mock_tado_api(getMe=None):
mock_tado = MagicMock()
if isinstance(getMe, Exception):
type(mock_tado).getMe = MagicMock(side_effect=getMe)
else:
type(mock_tado).getMe = MagicMock(return_value=getMe)
return mock_tado
async def test_form(hass):
"""Test we can setup though the user path."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_tado_api = _get_mock_tado_api(getMe={"homes": [{"id": 1, "name": "myhome"}]})
with patch(
"homeassistant.components.tado.config_flow.Tado",
return_value=mock_tado_api,
), patch(
"homeassistant.components.tado.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-username", "password": "test-password"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "myhome"
assert result2["data"] == {
"username": "test-username",
"password": "test-password",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
response_mock = MagicMock()
type(response_mock).status_code = 401
mock_tado_api = _get_mock_tado_api(getMe=requests.HTTPError(response=response_mock))
with patch(
"homeassistant.components.tado.config_flow.Tado",
return_value=mock_tado_api,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-username", "password": "test-password"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
response_mock = MagicMock()
type(response_mock).status_code = 500
mock_tado_api = _get_mock_tado_api(getMe=requests.HTTPError(response=response_mock))
with patch(
"homeassistant.components.tado.config_flow.Tado",
return_value=mock_tado_api,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-username", "password": "test-password"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_no_homes(hass):
"""Test we handle no homes error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_tado_api = _get_mock_tado_api(getMe={"homes": []})
with patch(
"homeassistant.components.tado.config_flow.Tado",
return_value=mock_tado_api,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-username", "password": "test-password"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "no_homes"}
async def test_form_homekit(hass):
"""Test that we abort from homekit if tado is already setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HOMEKIT},
data={"properties": {"id": "AA:BB:CC:DD:EE:FF"}},
)
assert result["type"] == "form"
assert result["errors"] == {}
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert flow["context"]["unique_id"] == "AA:BB:CC:DD:EE:FF"
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_USERNAME: "mock", CONF_PASSWORD: "mock"}
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HOMEKIT},
data={"properties": {"id": "AA:BB:CC:DD:EE:FF"}},
)
assert result["type"] == "abort"
| [
"noreply@github.com"
] | BenWoodford.noreply@github.com |
7ecd0ab348fff8b275e8175146fd946a9f3c5919 | 3e5150447a2c90c26354500f1df9660ef35c990b | /std_modules/plistlib/.readPlist() | 91b219cacac94b718cb81c036acd8ba166b04078 | [] | no_license | kilirobbs/python-fiddle | 8d6417ebff9d6530e713b6724f8416da86c24c65 | 9c2f320bd2391433288cd4971c2993f1dd5ff464 | refs/heads/master | 2016-09-11T03:56:39.808358 | 2013-03-19T19:26:19 | 2013-03-19T19:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | #!/usr/bin/env python
from os.path import expanduser
from plistlib import readPlist
plist="~/Library/LaunchAgents/org.lighttpd.plist"
path=expanduser(plist)
pl = readPlist(path)
print pl.__class__,pl
print pl.keys()
print pl.Label
print "Label" in pl
print "KeepAlive" in pl
if "Program" in pl:
print pl.Program
if "ProgramArguments" in pl:
print pl.ProgramArguments
# print pl.programarguments # AttributeError | [
"cancerhermit@gmail.com"
] | cancerhermit@gmail.com | |
1c816c5f80b1281d92ecec69626cd50125706f1c | cc13092b652113221a877db2bf907c050dc30aaa | /kws_streaming/models/tc_resnet.py | 03c0408ad9a532fc355b5e35dd569b70ed1d7f5e | [
"Apache-2.0"
] | permissive | Th-Fo/google-research | 1e62ee50f76c2931fdb6db1de736a85e94251e25 | 9d7bd968843c27216d01c92ff832b1cd58cafa85 | refs/heads/master | 2020-12-27T17:30:43.916109 | 2020-05-25T17:06:20 | 2020-05-25T17:06:20 | 237,989,659 | 1 | 3 | Apache-2.0 | 2020-05-25T17:06:22 | 2020-02-03T14:52:08 | null | UTF-8 | Python | false | false | 6,116 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model based on combination of n by 1 convolutions with residual blocks."""
from kws_streaming.layers import speech_features
from kws_streaming.layers.compat import tf
from kws_streaming.models.utils import parse
def model_parameters(parser_nn):
"""Temporal Convolution Resnet model parameters.
In more details parameters are described at:
https://arxiv.org/pdf/1904.03814.pdf
We converted model to Keras and made it compatible with TF V2
https://github.com/hyperconnect/TC-ResNet
Args:
parser_nn: global command line args parser
Returns: parser with updated arguments
"""
parser_nn.add_argument(
'--channels',
type=str,
default='24, 36, 36, 48, 48, 72, 72',
help='Number of channels per convolutional block (including first conv)',
)
parser_nn.add_argument(
'--debug_2d',
type=int,
default=0,
help='if 0 conv_kernel will be [3, 3], else conv_kernel [3, 1]',
)
parser_nn.add_argument(
'--pool_size',
type=str,
default='',
help="pool size for example '4,4'",
)
parser_nn.add_argument(
'--pool_stride',
type=int,
default=0,
help='pool stride, for example 4',
)
parser_nn.add_argument(
'--bn_momentum',
type=float,
default=0.997,
help='Momentum for the moving average',
)
parser_nn.add_argument(
'--bn_center',
type=int,
default=1,
help='If True, add offset of beta to normalized tensor.'
'If False, beta is ignored',
)
parser_nn.add_argument(
'--bn_scale',
type=int,
default=1,
help='If True, multiply by gamma. If False, gamma is not used. '
'When the next layer is linear (also e.g. nn.relu), this can be disabled'
'since the scaling will be done by the next layer.',
)
parser_nn.add_argument(
'--bn_renorm',
type=int,
default=0,
help='Whether to use Batch Renormalization',
)
parser_nn.add_argument(
'--dropout',
type=float,
default=0.2,
help='Percentage of data dropped',
)
def model(flags):
"""Temporal Convolution ResNet model.
It is based on paper:
Temporal Convolution for Real-time Keyword Spotting on Mobile Devices
https://arxiv.org/pdf/1904.03814.pdf
Args:
flags: data/model parameters
Returns:
Keras model for training
"""
input_audio = tf.keras.layers.Input(
shape=(flags.desired_samples,), batch_size=flags.batch_size)
net = speech_features.SpeechFeatures(
speech_features.SpeechFeatures.get_params(flags))(
input_audio)
time_size, feature_size = net.shape[1:3]
channels = parse(flags.channels)
net = tf.keras.backend.expand_dims(net)
if flags.debug_2d:
conv_kernel = first_conv_kernel = (3, 3)
else:
net = tf.reshape(
net, [-1, time_size, 1, feature_size]) # [batch, time, 1, feature]
first_conv_kernel = (3, 1)
conv_kernel = (9, 1)
net = tf.keras.layers.Conv2D(
filters=channels[0],
kernel_size=first_conv_kernel,
strides=1,
padding='same',
activation='linear')(
net)
net = tf.keras.layers.BatchNormalization(
momentum=flags.bn_momentum,
center=flags.bn_center,
scale=flags.bn_scale,
renorm=flags.bn_renorm)(
net)
net = tf.keras.layers.Activation('relu')(net)
if parse(flags.pool_size):
net = tf.keras.layers.AveragePooling2D(
pool_size=parse(flags.pool_size), strides=flags.pool_stride)(
net)
channels = channels[1:]
# residual blocks
for n in channels:
if n != net.shape[-1]:
stride = 2
layer_in = tf.keras.layers.Conv2D(
filters=n,
kernel_size=1,
strides=stride,
padding='same',
activation='linear')(
net)
layer_in = tf.keras.layers.BatchNormalization(
momentum=flags.bn_momentum,
center=flags.bn_center,
scale=flags.bn_scale,
renorm=flags.bn_renorm)(
layer_in)
layer_in = tf.keras.layers.Activation('relu')(layer_in)
else:
layer_in = net
stride = 1
net = tf.keras.layers.Conv2D(
filters=n,
kernel_size=conv_kernel,
strides=stride,
padding='same',
activation='linear')(
net)
net = tf.keras.layers.BatchNormalization(
momentum=flags.bn_momentum,
center=flags.bn_center,
scale=flags.bn_scale,
renorm=flags.bn_renorm)(
net)
net = tf.keras.layers.Activation('relu')(net)
net = tf.keras.layers.Conv2D(
filters=n,
kernel_size=conv_kernel,
strides=1,
padding='same',
activation='linear')(
net)
net = tf.keras.layers.BatchNormalization(
momentum=flags.bn_momentum,
center=flags.bn_center,
scale=flags.bn_scale,
renorm=flags.bn_renorm)(
net)
# residual connection
net = tf.keras.layers.Add()([net, layer_in])
net = tf.keras.layers.Activation('relu')(net)
net = tf.keras.layers.AveragePooling2D(
pool_size=net.shape[1:3], strides=1)(
net)
net = tf.keras.layers.Dropout(rate=flags.dropout)(net)
# fully connected layer
net = tf.keras.layers.Conv2D(
filters=flags.label_count,
kernel_size=1,
strides=1,
padding='same',
activation='linear')(
net)
net = tf.reshape(net, shape=(-1, net.shape[3]))
return tf.keras.Model(input_audio, net)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
03e25ec68fa24203aeca5c8b73e78b1da4d5005b | d4517268b8724ef208e9d07e59628208cb0832e9 | /BalancedSmileys/py2/bs.py | ae9c513dbea5334261132bb501da379f9a70fc60 | [] | no_license | StevenDunn/CodeEval | 5a8a47d3a90d9bed350228f6647e41f1298f46c2 | b81bb31a600826d4b3b242a9a35aa1d28c2eb484 | refs/heads/master | 2021-01-15T15:33:53.155975 | 2018-04-01T13:54:16 | 2018-04-01T13:54:16 | 11,549,566 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | # balanced smileys soln in py2 for code eval by steven a dunn
import sys
def is_balanced(line):
if line == "":
return "YES"
line = line.replace(":)", "}").replace(":(", "{")
min_val = 0
max_val = 0
for i in range(0, len(line)):
c = line[i]
if c == '(':
min_val += 1
max_val += 1
elif c == ')':
min_val -= 1
max_val -= 1
if max_val < 0:
return "NO"
elif c == '{':
max_val += 1
elif c == '}':
min_val -= 1
if min_val <= 0 and 0 <= max_val:
return "YES"
return "NO"
f = open(sys.argv[1], 'r')
for line in f:
print is_balanced(line)
f.close() | [
"stevendunn@gmail.com"
] | stevendunn@gmail.com |
d2fb1ee1dc5bf60a4b7ed477b3f0d3dd9283e44f | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf.0/gsn-edf_ut=3.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=66/params.py | d68d8385fdd6a0806cc34b1f280849c3602ecd11 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.527214',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 66,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
be8e93fb9885fd8b2e99c287d74219d031f769ab | 95d144459ff7cbfc009c20ec16f3c06ddda867cc | /archimedes/archimedesquestion/urls.py | 676fcfc9f625eacd64a157dd402ef044404dc22f | [] | no_license | cvelas11/mmobious | b9c75fde17b038ac6bab8c2d698d588e27f8d8d6 | 605a9425dda1ccb9978c998eff679d5ddc7c0db8 | refs/heads/master | 2021-07-09T06:01:59.712677 | 2021-04-05T15:26:30 | 2021-04-05T15:26:30 | 236,180,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | """archimedes URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from django.conf.urls import include
from archimedesquestion import views
from django.views.generic import TemplateView
app_name= 'archimedesquestion'
urlpatterns = [
url("^register", views.register, name = 'register'),
url("^ingresar", views.ingresar, name = 'ingresar'),
url("^logout", views.logout_func, name = 'logout'),
url("^practicar", views.practicar, name = 'practicar'),
url("^recursos", views.recursos, name = 'recursos'),
url("^questions", views.questions, name = 'questions'),
url("^iniciar", views.iniciar, name = 'iniciar'),
url("^proyecto", views.proyecto, name = 'proyecto'),
url("^dashboard", views.dashboard, name = 'dashboard'),
# path('practicar', TemplateView.as_view(template_name='archimedesquestion/practicar.html')), # <--
#path('accounts/', include('allauth.urls'))
]
| [
"(none)"
] | (none) |
cf3fcd0da0ce278ca386e594acdeb7ccd3d457e6 | d24a6e0be809ae3af8bc8daa6dacfc1789d38a84 | /ABC/ABC151-200/ABC196/B.py | 76ae65459e878d6624bf37e1e3c5fb61dc875458 | [] | no_license | k-harada/AtCoder | 5d8004ce41c5fc6ad6ef90480ef847eaddeea179 | 02b0a6c92a05c6858b87cb22623ce877c1039f8f | refs/heads/master | 2023-08-21T18:55:53.644331 | 2023-08-05T14:21:25 | 2023-08-05T14:21:25 | 184,904,794 | 9 | 0 | null | 2023-05-22T16:29:18 | 2019-05-04T14:24:18 | Python | UTF-8 | Python | false | false | 390 | py | def solve(xs):
if "." in xs:
return int(xs[:xs.index(".")])
else:
return int(xs)
def main():
xs = input()
res = solve(xs)
print(res)
def test():
assert solve("5.90") == 5
assert solve("0") == 0
assert solve("84939825309432908832902189.9092309409809091329") == 84939825309432908832902189
if __name__ == "__main__":
test()
main()
| [
"cashfeg@gmail.com"
] | cashfeg@gmail.com |
88387b3005840626d21e96362ebb5b34eee260d1 | 5ef5abb189907b010e544e3c42ce1a38ed8e298f | /mysite/baseItems/models.py | aa8e62fe30b17d1bf2c62081b129658066edc4bb | [] | no_license | LKingJ23/Gymkana-formacion-Django | 5a5daf27abdd100ee8f9e0e61af5eb010899928a | 63d49b7ed81e8a3d0c5ed9c9765801680fb51ef7 | refs/heads/master | 2021-04-15T19:03:33.830028 | 2018-04-09T16:29:59 | 2018-04-09T16:29:59 | 126,459,951 | 0 | 0 | null | 2018-04-09T16:30:00 | 2018-03-23T09:05:57 | Python | UTF-8 | Python | false | false | 1,023 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.db.models.signals import post_delete
from django.dispatch import receiver
import os
class BaseItems(models.Model):
title = models.CharField(max_length=200, blank=False, null=False)
subtitle = models.CharField(max_length=200, blank=False, null=False)
body = models.TextField()
class Meta:
abstract = True
def __str__(self):
return self.title
class New(BaseItems):
publish_date = models.DateField(auto_now_add=True)
image = models.ImageField(upload_to="img", default='/img/periodico.jpg')
class Event(BaseItems):
start_date = models.DateField(blank=False, null=False)
end_date = models.DateField(blank=False, null=False)
@receiver(post_delete, sender=New)
def delete_img(sender, **kwargs):
try:
image = kwargs.get('instance').image
if image.name != "/img/periodico.jpg":
os.remove(image.path)
except OSError:
pass
| [
"lkingj23@gmail.com"
] | lkingj23@gmail.com |
e61ef3bc74137cb2c14a765724f0967c90ad8dfc | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/apimanagement/v20210801/get_policy.py | f723b56f81345494660930c00c7d052abe191863 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,750 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetPolicyResult',
'AwaitableGetPolicyResult',
'get_policy',
'get_policy_output',
]
@pulumi.output_type
class GetPolicyResult:
"""
Policy Contract details.
"""
def __init__(__self__, format=None, id=None, name=None, type=None, value=None):
if format and not isinstance(format, str):
raise TypeError("Expected argument 'format' to be a str")
pulumi.set(__self__, "format", format)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
Format of the policyContent.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> str:
"""
Contents of the Policy as defined by the format.
"""
return pulumi.get(self, "value")
class AwaitableGetPolicyResult(GetPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPolicyResult(
format=self.format,
id=self.id,
name=self.name,
type=self.type,
value=self.value)
def get_policy(format: Optional[str] = None,
policy_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPolicyResult:
"""
Policy Contract details.
:param str format: Policy Export Format.
:param str policy_id: The identifier of the Policy.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['format'] = format
__args__['policyId'] = policy_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20210801:getPolicy', __args__, opts=opts, typ=GetPolicyResult).value
return AwaitableGetPolicyResult(
format=__ret__.format,
id=__ret__.id,
name=__ret__.name,
type=__ret__.type,
value=__ret__.value)
@_utilities.lift_output_func(get_policy)
def get_policy_output(format: Optional[pulumi.Input[Optional[str]]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPolicyResult]:
"""
Policy Contract details.
:param str format: Policy Export Format.
:param str policy_id: The identifier of the Policy.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
152ea5d19b0c59f0c85e729ac604a8d44fb1fa72 | 105ef2d5f8bba13c15deb8c4a2a9af307b4e547a | /Programmers/level2_python/42860.py | ba611eaf3ea88a982dd35ae7396d3d3a6cd9df6f | [] | no_license | caniro/algo-note | 1ec4c0e08adcb542d3356daf7b6e943af722394f | d237a5b58a67ca453dc7a1a335f99428af2c5df5 | refs/heads/master | 2023-08-29T22:39:35.189711 | 2021-11-04T11:18:07 | 2021-11-04T11:18:07 | 260,473,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,096 | py | # 조이스틱 : https://programmers.co.kr/learn/courses/30/lessons/42860
def solution(name):
def go_next_idx():
nonlocal answer, current_idx
diff_min = 100
next_idx = 0
for i in range(length):
if name[i] == current_string[i]:
continue
diff = min(abs(i - current_idx), length - abs(i - current_idx))
if diff_min > diff:
diff_min = diff
next_idx = i
current_idx = next_idx
answer += diff_min
def change_char():
nonlocal answer, current_string
to_change = name[current_idx]
current_string[current_idx] = to_change
cost = min(abs(ord(to_change) - ord('A')),\
(ord('Z') - ord('A') + 1) - abs(ord(to_change) - ord('A')))
answer += cost
answer = 0
length = len(name)
current_string = ['A' for _ in range(length)]
current_idx = 0
while True:
if ''.join(current_string) == name:
break
go_next_idx()
change_char()
return answer
| [
"caniro@naver.com"
] | caniro@naver.com |
9ef188c317fcaecca1e9dba557f4b68ad439a924 | ccc04ec2fe54772797682642aa17665fff848575 | /supplier_rfq/migrations.py | 0d6c919e6d1fdc1257d774773dbf55fb6eacf74a | [
"MIT"
] | permissive | admin627863/supplier_rfq | dcf33b8db0af1a8ef7dd470c338631a8a4d4c230 | 81434ea527c724e95ed2724d212fd9f548afb81a | refs/heads/main | 2023-08-10T11:37:11.793389 | 2021-09-24T05:32:25 | 2021-09-24T05:32:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,755 | py | import frappe
from frappe.modules.import_file import import_file_by_path
from frappe.utils import get_bench_path
import os
from os.path import join
def after_migrate(**args):
supplier_rfq_create_custom_fields(**args)
make_property_setter()
def make_property_setter():
supplier_rfq_app_folder_path='/apps/supplier_rfq/supplier_rfq/import_records'
if(not frappe.db.exists('Property Setter','Request for Quotation-main-title_field')):
fname="property_setter.json"
import_folder_path="{bench_path}/{app_folder_path}".format(bench_path=get_bench_path(),app_folder_path=supplier_rfq_app_folder_path)
make_records(import_folder_path,fname)
def make_records(path, fname):
if os.path.isdir(path):
import_file_by_path("{path}/{fname}".format(path=path, fname=fname))
def supplier_rfq_create_custom_fields(**args):
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
custom_fields = {
'Request for Quotation':[
dict(
allow_in_quick_entry= 0,
allow_on_submit= 0,
bold= 0,
collapsible= 0,
columns= 0,
docstatus= 0,
doctype= 'Custom Field',
dt= 'Request for Quotation',
fetch_if_empty= 0,
fieldname= 'project_cf',
fieldtype= 'Link',
hidden= 0,
ignore_user_permissions= 0,
ignore_xss_filter= 0,
in_global_search= 0,
in_list_view= 0,
in_standard_filter= 0,
insert_after= 'vendor',
label= 'Project',
length= 0,
name= 'Request for Quotation-project_cf',
no_copy= 0,
options= 'Project',
permlevel= 0,
print_hide= 0,
print_hide_if_no_value= 0,
read_only= 0,
report_hide= 0,
reqd= 0,
search_index= 0,
translatable= 0,
unique= 0
),
dict(
allow_in_quick_entry= 0,
allow_on_submit= 0,
bold= 0,
collapsible= 0,
columns= 0,
docstatus= 0,
doctype= 'Custom Field',
dt= 'Request for Quotation',
fetch_if_empty= 0,
fieldname= 'supplier_comparison_section',
fieldtype= 'Section Break',
hidden= 0,
ignore_user_permissions= 0,
ignore_xss_filter= 0,
in_global_search= 0,
in_list_view= 0,
in_standard_filter= 0,
insert_after= 'items',
label= 'Supplier Comparison',
length= 0,
name= 'Request for Quotation-supplier_comparison_section',
no_copy= 0,
permlevel= 0,
print_hide= 0,
print_hide_if_no_value= 0,
read_only= 0,
report_hide= 0,
reqd= 0,
search_index= 0,
translatable= 0,
unique= 0
),
dict(
allow_in_quick_entry= 0,
allow_on_submit= 1,
bold= 0,
collapsible= 0,
columns= 0,
docstatus= 0,
doctype= 'Custom Field',
dt= 'Request for Quotation',
fetch_if_empty= 0,
fieldname= 'supplier_quotation_comparisons',
fieldtype= 'Table',
hidden= 0,
ignore_user_permissions= 0,
ignore_xss_filter= 0,
in_global_search= 0,
in_list_view= 0,
in_standard_filter= 0,
insert_after= 'supplier_comparison_section',
label= '',
length= 0,
name= 'Request for Quotation-supplier_quotation_comparisons',
no_copy= 0,
options= 'Supplier Quotation Comparison CT',
permlevel= 0,
print_hide= 0,
print_hide_if_no_value= 0,
read_only= 0,
report_hide= 0,
reqd= 0,
search_index= 0,
translatable= 0,
unique= 0
)
],
'Supplier Quotation Item':[
dict(
allow_in_quick_entry= 0,
allow_on_submit= 0,
bold= 0,
collapsible= 0,
columns= 0,
docstatus= 0,
doctype= 'Custom Field',
dt= 'Supplier Quotation Item',
fetch_if_empty= 0,
fieldname= 'schedule_date',
fieldtype= 'Date',
hidden= 0,
ignore_user_permissions= 0,
ignore_xss_filter= 0,
in_global_search= 0,
in_list_view= 1,
in_standard_filter= 0,
insert_after= 'item_name',
label= 'Required Date',
length= 0,
name= 'Supplier Quotation Item-schedule_date',
no_copy= 0,
permlevel= 0,
print_hide= 0,
print_hide_if_no_value= 0,
read_only= 0,
report_hide= 0,
reqd= 0,
search_index= 0,
translatable= 0,
unique= 0
)
],
'Supplier Quotation':[
dict(
allow_in_quick_entry= 0,
allow_on_submit= 0,
bold= 0,
collapsible= 0,
columns= 0,
docstatus= 0,
doctype= 'Custom Field',
dt= 'Supplier Quotation',
fetch_if_empty= 0,
fieldname= 'supplier_uploaded_attachment_cf',
fieldtype= 'Attach',
hidden= 0,
ignore_user_permissions= 0,
ignore_xss_filter= 0,
in_global_search= 0,
in_list_view= 0,
in_standard_filter= 0,
insert_after= 'supplier_name',
label= 'Supplier Uploaded Attachment',
length= 0,
name= 'Supplier Quotation-supplier_uploaded_attachment_cf',
no_copy= 0,
permlevel= 0,
print_hide= 0,
print_hide_if_no_value= 0,
read_only= 0,
report_hide= 0,
reqd= 0,
search_index= 0,
translatable= 0,
unique= 0,
),
dict(
allow_in_quick_entry= 0,
allow_on_submit= 0,
bold= 0,
collapsible= 0,
columns= 0,
docstatus= 0,
doctype= 'Custom Field',
dt= 'Supplier Quotation',
fetch_if_empty= 0,
fieldname= 'supplier_comparison',
fieldtype= 'Section Break',
hidden= 0,
ignore_user_permissions= 0,
ignore_xss_filter= 0,
in_global_search= 0,
in_list_view= 0,
in_standard_filter= 0,
insert_after= 'amended_from',
label= 'Supplier Comparison',
length= 0,
name= 'Supplier Quotation-supplier_comparison',
no_copy= 0,
permlevel= 0,
print_hide= 0,
print_hide_if_no_value= 0,
read_only= 0,
report_hide= 0,
reqd= 0,
search_index= 0,
translatable= 0,
unique= 0
),
dict(
allow_in_quick_entry= 0,
allow_on_submit= 1,
bold= 0,
collapsible= 0,
columns= 0,
docstatus= 0,
doctype= 'Custom Field',
dt= 'Supplier Quotation',
fetch_if_empty= 0,
fieldname= 'supplier_quotation_comparisons',
fieldtype= 'Table',
hidden= 0,
ignore_user_permissions= 0,
ignore_xss_filter= 0,
in_global_search= 0,
in_list_view= 0,
in_standard_filter= 0,
insert_after= 'supplier_comparison',
label= '',
length= 0,
name= 'Supplier Quotation-supplier_quotation_comparisons',
no_copy= 0,
options= 'Supplier Quotation Comparison CT',
permlevel= 0,
print_hide= 0,
print_hide_if_no_value= 0,
read_only= 1,
report_hide= 0,
reqd= 0,
search_index= 0,
translatable= 0,
unique= 0
),
dict(
allow_in_quick_entry= 0,
allow_on_submit= 0,
bold= 0,
collapsible= 0,
columns= 0,
docstatus= 0,
doctype= 'Custom Field',
dt= 'Supplier Quotation',
fetch_if_empty= 0,
fieldname= 'supplier_notes',
fieldtype= 'Text',
hidden= 0,
ignore_user_permissions= 0,
ignore_xss_filter= 0,
in_global_search= 0,
in_list_view= 0,
in_standard_filter= 0,
insert_after= 'terms',
label= 'Supplier Notes',
length= 0,
name= 'Supplier Quotation-supplier_notes',
no_copy= 0,
permlevel= 0,
print_hide= 0,
print_hide_if_no_value= 0,
read_only= 0,
report_hide= 0,
reqd= 0,
search_index= 0,
translatable= 0,
unique= 0
)
]
}
create_custom_fields(custom_fields)
frappe.db.commit() # to avoid implicit-commit errors
| [
"mr.ashish.shah@gmail.com"
] | mr.ashish.shah@gmail.com |
f1c8081f521eee2c2e999506d85961e520a8ba5b | d50685a3f3d612349b1f6627ed8b807f0eec3095 | /demo/framework/ControlNode/aa.py | 94eb965303f8c805e95a0d88a91a316126e60660 | [] | no_license | Erich6917/python_littlespider | b312c5d018bce17d1c45769e59243c9490e46c63 | 062206f0858e797945ce50fb019a1dad200cccb4 | refs/heads/master | 2023-02-12T23:22:27.352262 | 2021-01-05T06:21:20 | 2021-01-05T06:21:20 | 113,631,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | # -*- coding: utf-8 -*-
# @Time : 2018/2/2
# @Author : LIYUAN134
# @File : aa.py
# @Commment:
#
urls = []
urls = None
if urls:
print 'loops'
else:
print 'empty'
| [
"1065120559@qq.com"
] | 1065120559@qq.com |
f90ff3052a6142fae24866037d0d30b94299f3da | 41147a2cdc221c2a8db84852ef7f29175b3409b3 | /caso/messenger/ssm.py | 36b1e4af9b37a37e62b77128d5d83278a5275f21 | [
"Apache-2.0"
] | permissive | aidaph/caso | ee1437d189c20044fe45fedeb4326a6f5fccd775 | e40e883c2eb729f040ef63085c94b48b0c7661e3 | refs/heads/master | 2023-08-01T13:52:34.556841 | 2023-06-07T08:11:52 | 2023-06-07T09:52:08 | 140,431,401 | 0 | 0 | Apache-2.0 | 2018-07-10T12:50:11 | 2018-07-10T12:50:09 | null | UTF-8 | Python | false | false | 6,487 | py | # -*- coding: utf-8 -*-
# Copyright 2014 Spanish National Research Council (CSIC)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Module containing the APEL SSM Messenger."""
import json
import typing
import warnings
# We are not parsing XML so this is safe
import xml.etree.ElementTree as ETree # nosec
import dirq.QueueSimple
from oslo_config import cfg
from oslo_log import log
import caso.exception
import caso.messenger
import caso.record
from caso import utils
LOG = log.getLogger(__name__)
opts = [
cfg.StrOpt(
"output_path",
default="/var/spool/apel/outgoing/openstack",
help="Directory to put the generated SSM records.",
),
cfg.IntOpt(
"max_size", default=100, help="Maximum number of records to send per message"
),
]
CONF = cfg.CONF
CONF.register_opts(opts, group="ssm")
__all__ = ["SSMMessenger", "SSMMessengerV04"]
class SSMMessenger(caso.messenger.BaseMessenger):
"""SSM Messenger that pushes formatted messages to a dirq instance."""
version_cloud = "0.4"
version_ip = "0.2"
version_accelerator = "0.1"
version_storage = None # FIXME: this cannot have a none version
def __init__(self):
"""Initialize the SSM messenger with configured values."""
try:
utils.makedirs(CONF.ssm.output_path)
except Exception as err:
LOG.error(f"Failed to create path {CONF.ssm.output_path} because {err}")
raise err
self.queue = dirq.QueueSimple.QueueSimple(CONF.ssm.output_path)
def _push_message_cloud(self, entries: typing.List[str]):
"""Push a compute message, formatted following the CloudRecord."""
message = f"APEL-cloud-message: v{self.version_cloud}\n"
aux = "\n%%\n".join(entries)
message += f"{aux}\n"
self.queue.add(message.encode("utf-8"))
def _push_message_json(
self,
entries: typing.List[str],
msg_type: str,
version: str,
):
"""Push a JSON message with a UsageRecords list."""
message = {
"Type": msg_type,
"Version": version,
"UsageRecords": [json.loads(r) for r in entries],
}
self.queue.add(json.dumps(message))
def _push_message_ip(self, entries: typing.List[str]):
"""Push an IP message."""
self._push_message_json(entries, "APEL Public IP message", self.version_ip)
def _push_message_accelerator(self, entries: typing.List[str]):
"""Push an accelerator message."""
self._push_message_json(
entries, "APEL-accelerator-message", self.version_accelerator
)
def _push_message_storage(self, entries):
"""Push a storage message."""
ETree.register_namespace(
"sr", "http://eu-emi.eu/namespaces/2011/02/storagerecord"
)
root = ETree.Element("sr:StorageUsageRecords")
for record in entries:
# We are not parsing XML so this is safe
sr = ETree.fromstring(record) # nosec
root.append(sr)
self.queue.add(ETree.tostring(root))
def _push(self, entries_cloud, entries_ip, entries_accelerator, entries_storage):
"""Push all messages, dividing them into smaller chunks.
This method gets lists of messages to be pushed in smaller chucks as per GGUS
ticket 143436: https://ggus.eu/index.php?mode=ticket_info&ticket_id=143436
"""
for i in range(0, len(entries_cloud), CONF.ssm.max_size):
entries = entries_cloud[i : i + CONF.ssm.max_size] # noqa(E203)
self._push_message_cloud(entries)
for i in range(0, len(entries_ip), CONF.ssm.max_size):
entries = entries_ip[i : i + CONF.ssm.max_size] # noqa(E203)
self._push_message_ip(entries)
for i in range(0, len(entries_accelerator), CONF.ssm.max_size):
entries = entries_accelerator[i : i + CONF.ssm.max_size] # noqa(E203)
self._push_message_accelerator(entries)
for i in range(0, len(entries_storage), CONF.ssm.max_size):
entries = entries_storage[i : i + CONF.ssm.max_size] # noqa(E203)
self._push_message_storage(entries)
def push(self, records):
"""Push all records to SSM.
This includes pushing the following records:
- Cloud records
- IP records
- Accelerator records
- Storage records
This method will iterate over all the records, transforming them into the
correct messages, then pushing it.
"""
if not records:
return
entries_cloud = []
entries_ip = []
entries_accelerator = []
entries_storage = []
for record in records:
if isinstance(record, caso.record.CloudRecord):
entries_cloud.append(record.ssm_message())
elif isinstance(record, caso.record.IPRecord):
entries_ip.append(record.ssm_message())
elif isinstance(record, caso.record.AcceleratorRecord):
entries_accelerator.append(record.ssm_message())
elif isinstance(record, caso.record.StorageRecord):
entries_storage.append(record.ssm_message())
else:
raise caso.exception.CasoError("Unexpected record format!")
self._push(entries_cloud, entries_ip, entries_accelerator, entries_storage)
class SSMMessengerV04(SSMMessenger):
"""Deprecated versioned SSM Messenger."""
def __init__(self):
"""Initialize the SSM V04 messenger.
Deprecated not to be used, please stop using SSM versioned messengers.
"""
msg = (
"Using an versioned SSM messenger is deprecated, please use "
"'ssm' as messenger instead in order to use the latest "
"version."
)
warnings.warn(msg, DeprecationWarning)
super(SSMMessengerV04, self).__init__()
| [
"aloga@ifca.unican.es"
] | aloga@ifca.unican.es |
7ff358f10e084e30bb2c22cfd323b7f85f03fce4 | 390d19c3159133d8c688396cb11b4ed3f8178d09 | /Programmers/Score_Kit/01_3_위장.py | 0591e5aaa95ab645a8f56734b027b4f3db4deddb | [] | no_license | JJayeee/CodingPractice | adba64cbd1d030b13a877f0b2e5ccc1269cb2e11 | 60f8dce48c04850b9b265a9a31f49eb6d9fc13c8 | refs/heads/master | 2021-08-16T17:14:01.161390 | 2021-07-16T00:42:18 | 2021-07-16T00:42:18 | 226,757,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py |
def solution(clothes):
answer = 1
closet = {}
for cloth in clothes:
closet[cloth[1]] = closet.get(cloth[1], 0) + 1
for k, v in closet.items():
answer *= v + 1
return answer - 1
clothes = [['yellow_hat', 'headgear'], ['blue_sunglasses', 'eyewear'], ['green_turban', 'headgear']]
print(solution(clothes))
| [
"jay.hyundong@gmail.com"
] | jay.hyundong@gmail.com |
d23def31f939fd7f0eb5ff5d4fde51ddad4a27cb | 6fb37fee016346120d4c14c4343516532304055a | /src/genie/libs/parser/iosxe/tests/test_show_crypto.py | f1e2ff61c5c3ad22aae41719fa89eb3f6e49188a | [
"Apache-2.0"
] | permissive | devbollinger/genieparser | 011526ebbd747c6dcd767535ce4bd33167e15536 | ad5ce7ba8f5153d1aeb9cffcfc4dde0871f3401c | refs/heads/master | 2020-12-20T11:36:00.750128 | 2020-01-24T18:45:40 | 2020-01-24T18:45:40 | 236,061,155 | 0 | 0 | Apache-2.0 | 2020-01-24T18:38:43 | 2020-01-24T18:38:42 | null | UTF-8 | Python | false | false | 6,649 | py |
# Python
import unittest
from unittest.mock import Mock
# ATS
from ats.topology import Device
# Genie
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.iosxe.show_crypto import ShowCryptoPkiCertificates
# ====================================================
# Unit test for 'show crypto pki certificates <WORD>'
# ====================================================
class test_show_crypto_pki_certificate(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output_c3850 = {
"trustpoints": {
"CISCO_IDEVID_SUDI": {
"associated_trustpoints":{
"certificate": {
"status": "Available",
"serial_number_in_hex": "793B572700000003750B",
"subject": {
"name": "WS-C3850-24P-0057D21BC800",
"pid": "WS-C3850-24P",
"cn": "WS-C3850-24P-0057D21BC800",
"serial_number": "FCW1947C0GF"
},
"issuer": {
"cn": "Cisco Manufacturing CA SHA2",
"o": "Cisco"
},
"crl_distribution_points": "http://www.cisco.com/security/pki/crl/cmca2.crl",
"usage": "General Purpose",
"validity_date": {
"start_date": "00:34:52 UTC Nov 20 2015",
"end_date": "00:44:52 UTC Nov 20 2025"
}
},
"ca_certificate": {
"status": "Available",
"serial_number_in_hex": "02",
"subject": {
"cn": "Cisco Manufacturing CA SHA2",
"o": "Cisco"
},
"issuer": {
"cn": "Cisco Root CA M2",
"o": "Cisco"
},
"crl_distribution_points": "http://www.cisco.com/security/pki/crl/crcam2.crl",
"usage": "Signature",
"validity_date": {
"start_date": "13:50:58 UTC Nov 12 2012",
"end_date": "13:00:17 UTC Nov 12 2037"
}
}
}
}
}
}
golden_output_c3850 = {'execute.return_value': '''
Certificate
Status: Available
Certificate Serial Number (hex): 793B572700000003750B
Certificate Usage: General Purpose
Issuer:
cn=Cisco Manufacturing CA SHA2
o=Cisco
Subject:
Name: WS-C3850-24P-0057D21BC800
Serial Number: PID:WS-C3850-24P SN:FCW1947C0GF
cn=WS-C3850-24P-0057D21BC800
serialNumber=PID:WS-C3850-24P SN:FCW1947C0GF
CRL Distribution Points:
http://www.cisco.com/security/pki/crl/cmca2.crl
Validity Date:
start date: 00:34:52 UTC Nov 20 2015
end date: 00:44:52 UTC Nov 20 2025
Associated Trustpoints: CISCO_IDEVID_SUDI
CA Certificate
Status: Available
Certificate Serial Number (hex): 02
Certificate Usage: Signature
Issuer:
cn=Cisco Root CA M2
o=Cisco
Subject:
cn=Cisco Manufacturing CA SHA2
o=Cisco
CRL Distribution Points:
http://www.cisco.com/security/pki/crl/crcam2.crl
Validity Date:
start date: 13:50:58 UTC Nov 12 2012
end date: 13:00:17 UTC Nov 12 2037
Associated Trustpoints: CISCO_IDEVID_SUDI Trustpool
'''}
golden_parsed_output_csr1000 = {
'trustpoints':
{'TP-self-signed-4146203551':
{'associated_trustpoints':
{'router_self_signed_certificate':
{'issuer':
{'cn': 'IOS-Self-Signed-Certificate-4146203551'},
'serial_number_in_hex': '01',
'status': 'Available',
'storage': 'nvram:IOS-Self-Sig#1.cer',
'subject':
{'cn': 'IOS-Self-Signed-Certificate-4146203551',
'name': 'IOS-Self-Signed-Certificate-4146203551'},
'usage': 'General Purpose',
'validity_date':
{'end_date': '00:00:00 UTC Jan 1 2020',
'start_date': '21:37:27 UTC Apr 23 2018'}}}}}}
golden_output_csr1000 = {'execute.return_value': '''
Router Self-Signed Certificate
Status: Available
Certificate Serial Number (hex): 01
Certificate Usage: General Purpose
Issuer:
cn=IOS-Self-Signed-Certificate-4146203551
Subject:
Name: IOS-Self-Signed-Certificate-4146203551
cn=IOS-Self-Signed-Certificate-4146203551
Validity Date:
start date: 21:37:27 UTC Apr 23 2018
end date: 00:00:00 UTC Jan 1 2020
Associated Trustpoints: TP-self-signed-4146203551
Storage: nvram:IOS-Self-Sig#1.cer
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowCryptoPkiCertificates(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_c3850(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_c3850)
obj = ShowCryptoPkiCertificates(device=self.device)
parsed_output = obj.parse(trustpoint_name='CISCO_IDEVID_SUDI')
self.assertEqual(parsed_output, self.golden_parsed_output_c3850)
def test_csr1000(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_csr1000)
obj = ShowCryptoPkiCertificates(device=self.device)
parsed_output = obj.parse(trustpoint_name='TP-self-signed-4146203551')
self.assertEqual(parsed_output, self.golden_parsed_output_csr1000)
if __name__ == '__main__':
unittest.main()
| [
"jeaubin@cisco.com"
] | jeaubin@cisco.com |
a634a215e16936e0746a4d323b85a847e4296c61 | f7d343efc7b48818cac4cf9b98423b77345a0067 | /deployment_validation/csv_to_yaml_convertor/csv_to_yaml_convertor.py | ccc797444b3541460f1b9eb12eabc0c558df00f4 | [] | no_license | vijaymaddukuri/python_repo | 70e0e24d0554c9fac50c5bdd85da3e15c6f64e65 | 93dd6d14ae4b0856aa7c6f059904cc1f13800e5f | refs/heads/master | 2023-06-06T02:55:10.393125 | 2021-06-25T16:41:52 | 2021-06-25T16:41:52 | 151,547,280 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,684 | py | from os.path import dirname, abspath, join
from robot.api import logger
from shutil import copyfile
import csv
import re
import sys
import yaml
current_dir = dirname(dirname(abspath(__file__)))
class CsvToYamlConvertor:
"""
Take the CSV input and covert it into yaml format.
"""
def __init__(self, service, base_yaml_file_path, csv_file_path, dir_to_store_yaml, filename=None):
"""
:param service: TAS or Middlewware or worker or deployment
:param yaml_file_path: Base YAML file name along with the location
:param csv_file_path: CSV file name with the location
"""
self.yaml_file_path = base_yaml_file_path
self.dir_to_store_yaml = dir_to_store_yaml
self.file_name = filename
# Open our data file in read-mode.
self.csvfile = open(csv_file_path, 'r')
# Save a CSV Reader object.
self.datareader = csv.reader(self.csvfile, delimiter=',', quotechar='"')
# Service name
self.service = service
# Empty array for data headings, which we will fill with the first row from our CSV.
self.data_headings = []
def load_yaml_file(self, filename):
"""
load YAML file
In case of any error, this function calls sys.exit(1)
:param filename: YAML filename along with the location
:return: YAML as dict
"""
try:
with open(filename, 'r') as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
logger.error(exc)
sys.exit(1)
except IOError as e:
logger.error(e)
sys.exit(1)
def update_yaml_data(self, myYaml, key, value, append_mode=False):
"""
Set or add a key to given YAML data. Call itself recursively.
:param myYaml: YAML data to be modified
:param key: key as array of key tokens
:param value: value of any data type
:param append_mode default is False
:return: modified YAML data
"""
if len(key) == 1:
if not append_mode or not key[0] in myYaml:
myYaml[key[0]] = value
else:
if type(myYaml[key[0]]) is not list:
myYaml[key[0]] = [myYaml[key[0]]]
myYaml[key[0]].append(value)
else:
if not key[0] in myYaml or type(myYaml[key[0]]) is not dict:
myYaml[key[0]] = {}
myYaml[key[0]] = self.update_yaml_data(myYaml[key[0]], key[1:], value, append_mode)
return myYaml
def rm_yaml_data(self, myYaml, key):
"""
Remove a key and it's value from given YAML data structure.
No error or such thrown if the key doesn't exist.
:param myYaml: YAML data to be modified
:param key: key as array of key tokens
:return: modified YAML data
"""
if len(key) == 1 and key[0] in myYaml:
del myYaml[key[0]]
elif key[0] in myYaml:
myYaml[key[0]] = self.rm_yaml_data(myYaml[key[0]], key[1:])
return myYaml
def save_yaml(self, data, yaml_file):
"""
Saves given YAML data to file and upload yaml file to remote machine
:param data: YAML data
:param yaml_file: Location to save the yaml file
"""
try:
with open(yaml_file, 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
except IOError as e:
logger.error(e)
sys.exit(1)
def convert_csv_to_yaml(self):
"""
Update the yaml file and save it
"""
# Loop through each row...
for row_index, row in enumerate(self.datareader):
# If this is the first row, populate our data_headings variable.
if row_index == 0:
data_headings = row
# Othrwise, create a YAML file from the data in this row...
else:
# Create a new config.yaml with filename based on index number (Tenant ID) of our current row
# and service
if self.file_name is None:
file_name = str(row[0]) + '_' + self.service.lower() + '_config' + '.yaml'
else:
file_name = self.file_name + '_' + 'config' + '.yaml'
updated_yaml_file_path = join(self.dir_to_store_yaml, file_name)
copyfile(self.yaml_file_path, updated_yaml_file_path)
readyamldata = self.load_yaml_file(updated_yaml_file_path)
# Empty string that we will fill with YAML formatted text based on data extracted from our CSV.
yaml_text = ""
# Loop through each cell in this row...
for cell_index, cell in enumerate(row):
# Compile a line of YAML text from our headings list and the text of the current cell,
# followed by a linebreak.
# Heading text is converted to lowercase. Spaces are converted to underscores and hyphens
# are removed.
# In the cell text, line endings are replaced with commas.
cell_heading = data_headings[cell_index].replace(" ", "_").replace("-", "")
# Create the list of keys
cell_items = cell_heading.split('.')
if len(cell_items) == 1:
cell_keys = [cell_heading]
else:
cell_keys = cell_items
# Get the cell value
cell_value = cell.replace("\n", ", ")
# Update the data in yaml format
set_value = self.update_yaml_data(readyamldata, cell_keys, cell_value)
# Save the yaml data into a file
self.save_yaml(set_value, updated_yaml_file_path)
# Open the above yaml file to update the list formatted data
f = open(updated_yaml_file_path, 'r')
f = f.read()
# Convert the data into list format using regex
final = (re.sub(r'(\'[0-9]\'\:\s+)', '- ', str(f)))
# Save the file
with open(updated_yaml_file_path, 'w') as f:
f.write(final)
# Close the CSV
self.csvfile.close()
# Sample Execution
# yamlObj = CsvToYamlConvertor('tas', r'C:\Users\config.yaml', r'C:\Users\Downloads\inputfile.csv')
# yamlObj.convert_csv_to_yaml()
| [
"Vijay.Maddukuri@virtustream.com"
] | Vijay.Maddukuri@virtustream.com |
1d877ae0dcc1e80b1288dcf6fc5c06f53a8b53c2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03326/s143256207.py | 2b339363954b6a8306957c1299b9177c0449d16f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | n, m = [int(i) for i in input().split()]
xyz = [[int(i) for i in input().split()] for _ in range(n)]
ans = 0
a = [0]*3
for a[0] in range(-1, 2, 2):
for a[1] in range(-1, 2, 2):
for a[2] in range(-1, 2, 2):
d = list(map(lambda x: sum([i * j for i, j in zip(a, x)]), xyz))
d.sort(reverse=1)
ans = max(ans, sum(d[:m]))
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
41e6e4d4a28bedaf4a97e2cec0c2c861189a34ea | 57d8323ca9bdd0965d487fe2e453a3cfb8dfa86f | /src/train_parabola.py | 1bbe280db5eed7ac6840269310212c152769c849 | [
"Unlicense"
] | permissive | mountain/suan-demo | 20ac79ddaf8b749c21badda37d07a3aeccdf7ba7 | 5136ae050156a2538aea4f718735995d3a289457 | refs/heads/master | 2023-04-07T10:19:49.148272 | 2021-04-09T05:24:20 | 2021-04-09T05:24:20 | 288,619,647 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,677 | py | import os
import arrow
import logging
import argparse
import numpy as np
import torch
import torch as th
import torch.nn as nn
from pathlib import Path
from leibniz.unet.base import UNet
from leibniz.unet.complex_hyperbolic import CmplxHyperBottleneck
from leibniz.unet.hyperbolic import HyperBottleneck
from leibniz.unet.senet import SEBottleneck
from leibniz.nn.activation import CappingRelu
from blks.direct import DirectBlocks
from blks.am import AMBlocks
from dataset.chaos_parabola import ChaosParabolaDataSet
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--gpu", type=str, default='0', help="index of gpu")
parser.add_argument("-c", "--n_cpu", type=int, default=64, help="number of cpu threads to use during batch generation")
parser.add_argument("-b", "--batch_size", type=int, default=64, help="size of the batches")
parser.add_argument("-e", "--epoch", type=int, default=0, help="current epoch to start training from")
parser.add_argument("-n", "--n_epochs", type=int, default=500, help="number of epochs of training")
parser.add_argument("-m", "--model", type=str, default='', help="metrological model to load")
parser.add_argument("-k", "--check", type=str, default='', help="checkpoint file to load")
opt = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu
print('cudnn:', th.backends.cudnn.version())
np.core.arrayprint._line_width = 150
np.set_printoptions(linewidth=np.inf)
name = opt.model
time_str = arrow.now().format('YYYYMMDD_HHmmss')
model_path = Path(f'./_log-{time_str}')
model_path.mkdir(exist_ok=True)
log_file = model_path / Path('train.log')
logging.basicConfig(level=logging.INFO, filename=log_file, filemode='w')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.info(str(opt))
root = './data'
if not os.path.exists(root):
os.mkdir(root)
train_set = ChaosParabolaDataSet(length=800)
test_set = ChaosParabolaDataSet(length=200)
batch_size = opt.batch_size
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_set,
batch_size=batch_size,
shuffle=False)
mean_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=batch_size,
shuffle=True)
total_sum = 0.0
total_cnt = 0.0
for step, sample in enumerate(mean_loader):
input, target = sample
input, target = input.float(), target.float()
data = th.cat((input, target), dim=1)
total_sum += data.sum().item()
total_cnt += np.prod(data.size())
mean = total_sum / total_cnt
print(mean)
std_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=batch_size,
shuffle=True)
total_std = 0.0
total_cnt = 0.0
for step, sample in enumerate(mean_loader):
input, target = sample
input, target = input.float(), target.float()
data = th.cat((input, target), dim=1)
total_std += ((data - mean) * (data - mean)).sum().item()
total_cnt += np.prod(data.size())
std = total_std / total_cnt
print(std)
class LearningModel(nn.Module):
def __init__(self):
super().__init__()
self.unet = UNet(2, 10, normalizor='batch', spatial=(32, 32), layers=5, ratio=0,
vblks=[2, 2, 2, 2, 2], hblks=[2, 2, 2, 2, 2],
scales=[-1, -1, -1, -1, -1], factors=[1, 1, 1, 1, 1],
block=AMBlocks, relu=CappingRelu(), final_normalized=True)
def forward(self, input):
input = (input - mean) / std
output = self.unet(input)
output = output * std + mean
return output
class PerfectModel(nn.Module):
def __init__(self):
super().__init__()
self.dummy = th.zeros(1)
def forward(self, input):
result = []
z = input[:, 1:2]
for ix in range(10):
z = 1 - 2 * z * z
result.append(z)
return th.cat(result, dim=1)
mdl = LearningModel()
pfc = PerfectModel()
mse = nn.MSELoss()
optimizer = th.optim.Adam(mdl.parameters())
def train(epoch):
train_size = 0
loss_per_epoch = 0.0
mdl.train()
for step, sample in enumerate(train_loader):
input, target = sample
input, target = input.float(), target.float()
if th.cuda.is_available():
input = input.cuda()
target = target.cuda()
mdl.cuda()
result = mdl(input)
loss = mse(result, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch = result.size()[0]
logger.info(f'Epoch: {epoch + 1:03d} | Step: {step + 1:03d} | Loss: {loss.item()}')
loss_per_epoch += loss.item() * batch
train_size += batch
logger.info(f'Epoch: {epoch + 1:03d} | Train Loss: {loss_per_epoch / train_size}')
def test(epoch):
mdl.eval()
test_size = 0
loss_per_epoch = 0.0
for step, sample in enumerate(test_loader):
input, target = sample
input, target = input.float(), target.float()
if th.cuda.is_available():
input = input.cuda()
target = target.cuda()
mdl.cuda()
with th.no_grad():
result = mdl(input)
loss = mse(result, target)
batch = result.size()[0]
logger.info(f'Epoch: {epoch + 1:03d} | Step: {step + 1:03d} | Loss: {loss.item()}')
loss_per_epoch += loss.item() * batch
test_size += batch
logger.info(f'Epoch: {epoch + 1:03d} | Test Loss: {loss_per_epoch / test_size}')
def baseline(epoch):
test_size = 0
loss_per_epoch = 0.0
for step, sample in enumerate(test_loader):
input, target = sample
input, target = input.float(), target.float()
if th.cuda.is_available():
input = input.cuda()
target = target.cuda()
pfc.cuda()
with th.no_grad():
result = pfc(input)
loss = mse(result, target)
batch = result.size()[0]
logger.info(f'Epoch: {epoch + 1:03d} | Step: {step + 1:03d} | Loss: {loss.item()}')
loss_per_epoch += loss.item() * batch
test_size += batch
logger.info(f'Epoch: {epoch + 1:03d} | Baseline: {loss_per_epoch / test_size}')
if __name__ == '__main__':
for epoch in range(opt.n_epochs):
try:
train(epoch)
test(epoch)
baseline(epoch)
except Exception as e:
logger.exception(e)
break
| [
"mingli.yuan@gmail.com"
] | mingli.yuan@gmail.com |
ad5f1a7e30e5cb32c1168b8a2e1ced91d1f31fa2 | afa9fcd0f2443515ba89e96ed4eb9416e9d11847 | /python/Gaffer/OpMatcher.py | bc89c2c9e05284e5cf226f31a38735f92aa6aa92 | [
"BSD-3-Clause",
"IJG"
] | permissive | dneg/gaffer | 6eb12b3ab3cde00afdf170c456969a38f5968237 | e87cb50f55a048cd7f6d5dcdfe6f95e38db2c5b6 | refs/heads/master | 2021-01-16T18:13:33.456876 | 2013-09-24T17:23:58 | 2013-09-24T17:23:58 | 13,094,917 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,396 | py | ##########################################################################
#
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import threading
import traceback
import weakref
import IECore
import Gaffer
## The OpMatcher class provides a means of searching for Ops suitable to
# act upon a given input value.
#
# The following Op userData entries are supported :
#
# ["OpMatcher"]["ignore"] - when this BoolData is True, the Op is not
# considered by the matcher.
#
# The following Parameter usedData entries are supported :
#
# ["OpMatcher"]["ignore"] - when this BoolData is True, the Parameter is not
# considered by the matcher.
class OpMatcher() :
def __init__( self, classLoader, classNamesMatchString = "*", reportErrors=True ) :
# these are filled with tuples of the form ( opClass, parameter, parameterPath )
self.__ops = []
for className in classLoader.classNames( classNamesMatchString ) :
try :
opClass = classLoader.load( className )
opInstance = opClass()
except Exception, m :
if reportErrors :
IECore.msg( IECore.Msg.Level.Error, "Gaffer.OpMatcher", "Error loading op \"%s\" : %s" % ( className, traceback.format_exc() ) )
continue
ignore = False
with IECore.IgnoredExceptions( KeyError ) :
# backwards compatibility with something proprietary
ignore = opInstance.userData()["UI"]["OpMatcher"]["ignore"].value
with IECore.IgnoredExceptions( KeyError ) :
ignore = opInstance.userData()["OpMatcher"]["ignore"].value
if ignore :
continue
parameters = []
self.__findParameters( opInstance.parameters(), parameters )
if len( parameters ) :
self.__ops.append( ( opClass, parameters ) )
## Returns a list of ( op, parameter ) tuples. Each op will be an Op instance
# where the corresponding parameter has already been set with parameterValue.
def matches( self, parameterValue ) :
processedValues = []
if isinstance( parameterValue, ( Gaffer.FileSystemPath, Gaffer.SequencePath ) ) :
# we might be able to match a single file
processedValues.append( IECore.StringData( str( parameterValue ) ) )
# or provide a single file input to an op which accepts multiple files
processedValues.append( IECore.StringVectorData( [ str( parameterValue ) ] ) )
elif isinstance( parameterValue, list ) :
processedValue = IECore.StringVectorData()
for value in parameterValue :
assert( isinstance( value, ( Gaffer.FileSystemPath, Gaffer.SequencePath ) ) )
processedValue.append( str( value ) )
elif isinstance( parameterValue, IECore.Object ) :
processedValue = parameterValue
if not processedValues :
return []
result = []
for opClass, parameters in self.__ops :
for testParameter, parameterPath in parameters :
for processedValue in processedValues :
if testParameter.valueValid( processedValue )[0] :
op = opClass()
parameter = op.parameters()
for name in parameterPath :
parameter = parameter[name]
parameter.setValue( processedValue )
result.append( ( op, parameter ) )
return result
__defaultInstances = weakref.WeakKeyDictionary()
__defaultInstancesMutex = threading.Lock()
## Returns an OpMatcher suitable for sharing by everyone - initialising one
# takes considerable time so it's preferable to reuse one if one has been created
# for the classLoader in question already. If classLoader is not specified then
# it defaults to IECore.ClassLoader.defaultOpLoader().
@classmethod
def defaultInstance( cls, classLoader=None ) :
if classLoader is None :
classLoader = IECore.ClassLoader.defaultOpLoader()
with cls.__defaultInstancesMutex :
result = cls.__defaultInstances.get( classLoader, None )
if result is None :
result = OpMatcher( classLoader )
cls.__defaultInstances[classLoader] = result
return result
def __findParameters( self, parameter, result, path = None ) :
if path is None :
path = []
for child in parameter.values() :
ignore = False
with IECore.IgnoredExceptions( KeyError ) :
# backwards compatibility with something proprietary
ignore = child.userData()["UI"]["OpMatcher"]["ignore"].value
with IECore.IgnoredExceptions( KeyError ) :
# backwards compatibility with something proprietary
ignore = child.userData()["OpMatcher"]["ignore"].value
if ignore :
continue
childPath = path + [ child.name ]
if isinstance( child, IECore.CompoundParameter ) :
self.__findParameters( child, result, childPath )
elif isinstance( child, ( IECore.PathParameter, IECore.PathVectorParameter ) ) :
if child.mustExist :
result.append( ( child, childPath ) )
| [
"thehaddonyoof@gmail.com"
] | thehaddonyoof@gmail.com |
6d82aec23779880869c6b12dcd1a18d3c756863f | 4f111dfacab0acc93900e7746538f85e0b3d8d78 | /day10/07普通装饰器.py | f04378d67d7cac40b5ae5f3509f4fccb5581cfef | [] | no_license | ljxproject/basecode | 5541f25cfe90d5fad26eac0b6e72802aa1fad1f4 | 485e4b41593839bfc61e67261247fb88dc80cc1d | refs/heads/master | 2020-03-26T16:16:26.422617 | 2018-08-17T08:05:11 | 2018-08-17T08:05:11 | 145,091,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py |
def check():
print("模拟的扩展功能")
def outter(func):
def inner(name):
check()
func(name)
return inner
# 在函数的上方加上@符号,@符号加上一个装饰器名,表示将该名字对应的装饰器应用在该函数上
# 本质: 相当于执行了 func1 = outter(func1) 跟函数名同名的一个装饰后的函数名 = 装饰器名(函数名)
@outter
def func1(name):
print("基础功能1")
print("名字是:%s"%(name))
# def func2():
# print("基础功能2")
func1("金三胖胖")
| [
"403496369@qq.com"
] | 403496369@qq.com |
f8d32401ff16e6d846bfc328c7542055956a6336 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/network/azure-mgmt-network/generated_samples/virtual_network_create_subnet_with_address_prefixes.py | b010b5096c523a684115794891bc8db241fd15c0 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,904 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-network
# USAGE
python virtual_network_create_subnet_with_address_prefixes.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.virtual_networks.begin_create_or_update(
resource_group_name="rg1",
virtual_network_name="test-vnet",
parameters={
"location": "eastus",
"properties": {
"addressSpace": {"addressPrefixes": ["10.0.0.0/16"]},
"subnets": [{"name": "test-2", "properties": {"addressPrefixes": ["10.0.0.0/28", "10.0.1.0/28"]}}],
},
},
).result()
print(response)
# x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2023-04-01/examples/VirtualNetworkCreateSubnetWithAddressPrefixes.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
466265e6d44645df2b813ff931b7d2359c095200 | 641347d14ddf44263a5e9c93ecf28263640b179e | /string_based_problems/longest_common_prefix/solution.py | 419c06399dcb511e59196881dcefd2ac8537ec2b | [] | no_license | Ranjit007ai/InterviewBit-String | 5ee97a13f8ab04d458ac148da800d18cfe5b8579 | c8f89caa6f57adf91920a066707ddffe814eea9e | refs/heads/main | 2023-03-29T00:18:08.914419 | 2021-03-27T14:31:49 | 2021-03-27T14:31:49 | 352,092,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | # Given a array of strings ,we need to find the longest common prefix in them.
# this function take the input array as a parameter and return the longest common prefix .
def longest_common_prefix(input_array):
min_length = 999999999999999 # min_length to store the length of the string with minimum size among all other strings
min_index = -1 # min_index to store the index of the minimum length string
n = len(input_array) # n store the length of the input array
prefix = ''
# traversing the input_array
for i in range(0,n):
length = len(input_array[i]) # length store the length the ith string in the array
if length < min_length :
min_length = length
min_index = i
min_word = input_array[min_index] # min_word is the string with minimum length
for i in range(0,min_length): # traversing the min_word
cur_alphabet = min_word[i]
count = 0
for j in range(0,n): # traversing the array
if input_array[j][i] == cur_alphabet :
count += 1
else:
break
if count == n :
prefix += cur_alphabet
else:
break
return prefix
# test case
input_array = ['abcdef','abcdf']
answer = longest_common_prefix(input_array)
print(answer)
| [
"noreply@github.com"
] | Ranjit007ai.noreply@github.com |
ce0fcf78cceaca4c2e0ca7774665e851f2ca73e1 | df7b40e95718ac0f6071a0ba571b42efc81cf6de | /tests/test_models/test_heads/test_nl_head.py | 6f4bede5e7f377b68aecf731d23634a8a5a04e69 | [
"Apache-2.0"
] | permissive | shinianzhihou/ChangeDetection | 87fa2c498248e6124aeefb8f0ee8154bda36deee | 354e71234bef38b6e142b6ba02f23db958582844 | refs/heads/master | 2023-01-23T20:42:31.017006 | 2023-01-09T11:37:24 | 2023-01-09T11:37:24 | 218,001,748 | 162 | 29 | Apache-2.0 | 2022-11-03T04:11:00 | 2019-10-28T08:41:54 | Python | UTF-8 | Python | false | false | 446 | py | import torch
from mmseg.models.decode_heads import NLHead
from .utils import to_cuda
def test_nl_head():
head = NLHead(in_channels=32, channels=16, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'nl_block')
inputs = [torch.randn(1, 32, 45, 45)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
| [
"1178396201@qq.com"
] | 1178396201@qq.com |
21d3d280c5d2176baa2730b07b6b4d3790cfc623 | 80bb9bb3c9c811a64e7916f5a76ac75acfde1548 | /Material para n2/N2 ESTUDO/Listas/somaLista.py | 419ed0413cb52dee63f283aa647178c87600cdd4 | [] | no_license | RafaelSanzio0/FACULDADE-PYTHON.2 | 83e75b7bbe42e78a1eeb03b8b80afda00d95bacf | ea4f306f1e7c068a24f03eab7231e41bb466d2a1 | refs/heads/master | 2020-03-27T06:55:58.326782 | 2018-11-24T04:28:47 | 2018-11-24T04:28:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | '''Escreva uma função não recursiva que receba como parâmetro uma lista de inteiros e
retorne como resposta a soma de todos os elementos da lista'''
lista = [10,20,30]
def soma_elementos(lista):
soma = 0
for elemento in lista:
soma += int(elemento)
print("A soma dos elementos é", soma)
soma = soma_elementos(lista)
| [
"rafaelsanzio16@gmail.com"
] | rafaelsanzio16@gmail.com |
dc5f56487684026050bb1ee73a2dba2e41624b0a | e1c9db908a9cefe46e293c7dcb1b6008e2e46951 | /synthetic.py | bdf3bfbead77b26faf256228be5bed41acac58e2 | [
"MIT"
] | permissive | luckyyangrun/gfnn | dbcfeca1910f6333474c6756b076dcac8601a2f3 | 36667861caacba921469d43917d002896e832c3f | refs/heads/master | 2023-03-16T20:10:15.025568 | 2020-08-06T10:32:52 | 2020-08-06T10:32:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,535 | py | import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from utils import sgc_precompute, set_seed, stack_feat, load_donuts
from models import get_model
from metrics import accuracy
import pickle as pkl
from args import get_syn_args
from time import perf_counter
from noise import zero_idx, gaussian
from train import train_regression, test_regression,\
train_gcn, test_gcn,\
train_kgcn, test_kgcn
# Arguments
args = get_syn_args()
# setting random seeds
set_seed(args.seed, args.cuda)
adj, features, labels, idx_train,\
idx_val, idx_test, mesh_pack = load_donuts(args.gen_num_samples,
args.gen_noise,
args.gen_factor,
args.gen_test_size,
args.gen_num_neigh,
args.normalization,
args.cuda,
args.invlap_alpha,
args.gen_mesh,
args.gen_mesh_step)
### NOISE TO FEATURES ONLY USE ZERO HERE
if args.noise != "None":
features = features.numpy()
if args.noise == "gaussian":
features = gaussian(features,
mean=args.gaussian_opt[0],
std=args.gaussian_opt[1])
if args.noise == "zero_test":
idx_test = idx_test.numpy()
features = zero_idx(features, idx_test)
idx_test = torch.LongTensor(idx_test)
if args.cuda:
idx_test = idx_test.cuda()
if args.noise != "None":
features = torch.FloatTensor(features).float()
if args.cuda:
features = features.cuda()
### END NOISE TO FEATURES
# Monkey patch for Stacked Logistic Regression
if args.model == "SLG":
nfeat = features.size(1) * args.degree
else:
nfeat = features.size(1)
model = get_model(model_opt=args.model,
nfeat=nfeat,
nclass=labels.max().item()+1,
nhid=args.hidden,
dropout=args.dropout,
cuda=args.cuda,
degree=args.degree)
if args.model == "SGC" or args.model == "gfnn":
features, precompute_time = sgc_precompute(features, adj, args.degree)
print("{:.4f}s".format(precompute_time))
model, acc_val, train_time = train_regression(model,
features[idx_train],
labels[idx_train],
features[idx_val],
labels[idx_val],
args.epochs,
args.weight_decay,
args.lr,
args.dropout)
acc_test = test_regression(model, features[idx_test], labels[idx_test])
print("Validation Accuracy: {:.4f} Test Accuracy: {:.4f}".format(acc_val,\
acc_test))
print("Pre-compute time: {:.4f}s, train time: {:.4f}s, total: {:.4f}s".format(precompute_time, train_time, precompute_time+train_time))
if args.model == "SLG":
features, precompute_time = stack_feat(features, adj, args.degree)
features = torch.FloatTensor(features).float()
if args.cuda:
features = features.cuda()
print("{:.4f}s".format(precompute_time))
model, acc_val, train_time = train_regression(model,
features[idx_train],
labels[idx_train],
features[idx_val],
labels[idx_val],
args.epochs,
args.weight_decay,
args.lr,
args.dropout)
acc_test = test_regression(model, features[idx_test], labels[idx_test])
print("Validation Accuracy: {:.4f} Test Accuracy: {:.4f}".format(acc_val,\
acc_test))
print("Pre-compute time: {:.4f}s, train time: {:.4f}s, total: {:.4f}s".format(precompute_time, train_time, precompute_time+train_time))
if args.model == "GCN":
model, acc_val, train_time = train_gcn(model,
adj,
features,
labels,
idx_train,
idx_val,
args.epochs,
args.weight_decay,
args.lr,
args.dropout)
acc_test = test_gcn(model, adj, features, labels, idx_test)
print("Validation Accuracy: {:.4f} Test Accuracy: {:.4f}".format(acc_val,\
acc_test))
precompute_time = 0
print("Pre-compute time: {:.4f}s, train time: {:.4f}s, total: {:.4f}s".format(precompute_time, train_time, precompute_time+train_time))
if args.model == "KGCN":
model, acc_val, train_time = train_kgcn(model,
adj,
features,
labels,
idx_train,
idx_val,
args.epochs,
args.weight_decay,
args.lr,
args.dropout)
acc_test = test_kgcn(model, adj, features, labels, idx_test)
precompute_time = 0
print("Validation Accuracy: {:.4f} Test Accuracy: {:.4f}".format(acc_val,\
acc_test))
print("Pre-compute time: {:.4f}s, train time: {:.4f}s, total: {:.4f}s".format(precompute_time, train_time, precompute_time+train_time))
| [
"hoangnt.titech@gmail.com"
] | hoangnt.titech@gmail.com |
f20790bffb57e8b31c4439e153e75454285669f6 | 9f78c2bfadd1e87d779a786e7cd0952b6fbc96f1 | /jobs/tasks/pay/index.py | 4ad10e9160a8d058533bfc8a0dc68c69ef51f8a4 | [] | no_license | Erick-LONG/order | 08393ed9b315cf2c6af5e2b9bfd6917605fe8d94 | 4b853403c9c949b3ecbe2766ec77750557cf11fc | refs/heads/master | 2022-11-11T09:32:53.570524 | 2020-06-30T09:20:18 | 2020-06-30T09:20:18 | 262,786,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | import datetime
from common.models.pay.PayOrder import PayOrder
from application import app,db
from common.libs.pay.PayService import PayService
'''
python manager.py runjob -m pay/index
'''
class JobTask():
def __init__(self):
pass
def run(self):
now = datetime.datetime.now()
date_before_30min = now + datetime.timedelta(minutes=-30)
list = PayOrder.query.filter_by(status=-8)\
.filter(PayOrder.created_time <= date_before_30min.strftime('%Y-%m-%d %H:%M:%S')).all()
if not list:
return
pay_target = PayService()
for item in list:
pay_target.closeOrder(pay_order_id= item.id)
| [
"834424581@qq.com"
] | 834424581@qq.com |
cb8f947de7142b0c8636844086b34bb65bf2e752 | f0a4ba1f1f941092e68e4b1ef9cff0d3852199ef | /프로그래머스/레벨1/K번째수.py | 1b25a50c2ba4db15fe16d9ea88c5b10318662d36 | [] | no_license | lsb530/Algorithm-Python | d41ddd3ca7675f6a69d322a4646d75801f0022b2 | a48c6df50567c9943b5d7218f874a5c0a85fcc6d | refs/heads/master | 2023-06-18T04:36:09.221769 | 2021-06-28T16:49:35 | 2021-06-28T16:49:35 | 367,775,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,750 | py | """배열 array의 i번째 숫자부터 j번째 숫자까지 자르고 정렬했을 때, k번째에 있는 수를 구하려 합니다.
예를 들어 array가 [1, 5, 2, 6, 3, 7, 4], i = 2, j = 5, k = 3이라면
array의 2번째부터 5번째까지 자르면 [5, 2, 6, 3]입니다.
1에서 나온 배열을 정렬하면 [2, 3, 5, 6]입니다.
2에서 나온 배열의 3번째 숫자는 5입니다.
배열 array, [i, j, k]를 원소로 가진 2차원 배열 commands가 매개변수로 주어질 때, commands의 모든
원소에 대해 앞서 설명한 연산을 적용했을 때 나온 결과를 배열에 담아 return 하도록 solution 함수를 작성해주세요."""
array = [1, 5, 2, 6, 3, 7, 4]
commands = [[2, 5, 3], [4, 4, 1], [1, 7, 3]]
# 예상 답 : [5, 6, 3]
def solutioned(array, commands):
answer = []
div_arr = []
for i in range(len(commands)):
for j in range(len(commands[i])):
div_arr.append(commands[i][j])
print(f'index : {i}, {j} => {commands[i][j]}')
if (j % 2) == 0:
lst = array[div_arr[0] - 1:div_arr[1]]
print(lst)
lst.sort()
answer.append(lst[div_arr[2] - 1])
print(f'sorted:{lst}')
print(div_arr)
div_arr.clear()
print(answer)
return answer
# solutioned(array,commands)
def solution(array, commands):
answer = []
div_arr = []
for i in range(len(commands)):
for j in range(len(commands[i])):
div_arr.append(commands[i][j])
if (j % 2) == 0:
lst = array[div_arr[0] - 1:div_arr[1]]
lst.sort()
answer.append(lst[div_arr[2] - 1])
div_arr.clear()
return answer
solutioned(array, commands)
| [
"lsb530@naver.com"
] | lsb530@naver.com |
818880e7d059aa3fec801d8a9246894b9e8abd74 | 58f81a20e6a22d17af626d423c6e1a5b160f784c | /services/core-api/app/api/securities/namespace.py | 8793e106eea95aca7cc765859cc08cff227fb56b | [
"Apache-2.0"
] | permissive | cryptobuks1/mds | 5e115c641dfa2d1a91097d49de9eeba1890f2b34 | 6e3f7006aeb5a93f061717e90846b2b0d620d616 | refs/heads/master | 2022-04-23T21:11:37.124243 | 2020-04-14T17:55:39 | 2020-04-14T17:55:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | from flask_restplus import Namespace
from app.api.securities.resources.bond import BondListResource, BondResource
from app.api.securities.resources.bond_status import BondStatusResource
from app.api.securities.resources.bond_type import BondTypeResource
from app.api.securities.resources.bond_document import BondDocumentListResource
from app.api.securities.resources.reclamation_invoice import ReclamationInvoiceListResource, ReclamationInvoiceResource
from app.api.securities.resources.reclamation_invoice_document import ReclamationInvoiceDocumentListResource
api = Namespace('securities', description='Securities operations')
# Bonds
api.add_resource(BondListResource, '/bonds')
api.add_resource(BondResource, '/bonds/<bond_guid>')
api.add_resource(BondStatusResource, '/bonds/status-codes')
api.add_resource(BondTypeResource, '/bonds/type-codes')
api.add_resource(BondDocumentListResource, '/<string:mine_guid>/bonds/documents')
# Reclamation Invoices
api.add_resource(ReclamationInvoiceListResource, '/reclamation-invoices')
api.add_resource(ReclamationInvoiceResource, '/reclamation-invoices/<reclamation_invoice_guid>')
api.add_resource(ReclamationInvoiceDocumentListResource,
'/<string:mine_guid>/reclamation-invoices/documents')
| [
"bcgov-csnr-cd@gov.bc.ca"
] | bcgov-csnr-cd@gov.bc.ca |
b071005e1bfea85be1a41e5bb422cd485b68feca | fd48fba90bb227017ac2da9786d59f9b9130aaf0 | /digsby/src/gui/native/mac/macfonts.py | 4366c8b020a76f6279dfea111cce5545dc170c0d | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | niterain/digsby | bb05b959c66b957237be68cd8576e3a7c0f7c693 | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | refs/heads/master | 2021-01-18T10:07:10.244382 | 2013-11-03T02:48:25 | 2013-11-03T02:48:25 | 5,991,568 | 1 | 0 | null | 2013-11-03T02:48:26 | 2012-09-28T02:24:50 | Python | UTF-8 | Python | false | false | 871 | py | '''
some Mac code from the internet
// Path to resource files in Mac bundle
wxString m_resourceDir;
// container of locally activated font
ATSFontContainerRef m_fontContainer;
FSSpec spec;
wxMacFilename2FSSpec(m_resourceDir + _T("Bank Gothic Light.ttf"),
&spec);
OSStatus status = ATSFontActivateFromFileSpecification(&spec,
kATSFontContextLocal, kATSFontFormatUnspecified, NULL,
kATSOptionFlagsDefault, &m_fontContainer);
wxASSERT_MSG(status == noErr, _T("font activation failed"));
and then anywhere in the app this works fine:
wxFont(9, wxFONTFAMILY_DEFAULT, wxFONTSTYLE_NORMAL |
wxFONTFLAG_ANTIALIASED, wxFONTWEIGHT_NORMAL, false, _T("Bank Gothic
Light"));
'''
import gui.native
def loadfont(fontpath, private = True, enumerable = False):
gui.native.notImplemented()
return False
def unloadfont(fontpath):
gui.native.notImplemented()
return False | [
"mdougherty@tagged.com"
] | mdougherty@tagged.com |
a69a510fba4f2708d9033225e05685db3b0c696d | 3f6c16ea158a8fb4318b8f069156f1c8d5cff576 | /.PyCharm2019.1/system/python_stubs/-417879439/_imp.py | 9b5c6a7460b88191080182531faaf5de3ec605ae | [] | no_license | sarthak-patidar/dotfiles | 08494170d2c0fedc0bbe719cc7c60263ce6fd095 | b62cd46f3491fd3f50c704f0255730af682d1f80 | refs/heads/master | 2020-06-28T23:42:17.236273 | 2019-10-01T13:56:27 | 2019-10-01T13:56:27 | 200,369,900 | 0 | 0 | null | 2019-08-03T12:56:33 | 2019-08-03T11:53:29 | Shell | UTF-8 | Python | false | false | 5,612 | py | # encoding: utf-8
# module _imp
# from (built-in)
# by generator 1.147
""" (Extremely) low-level import machinery bits as used by importlib and imp. """
# no imports
# functions
def acquire_lock(*args, **kwargs): # real signature unknown
"""
Acquires the interpreter's import lock for the current thread.
This lock should be used by import hooks to ensure thread-safety when importing
modules. On platforms without threads, this function does nothing.
"""
pass
def create_builtin(*args, **kwargs): # real signature unknown
""" Create an extension module. """
pass
def create_dynamic(*args, **kwargs): # real signature unknown
""" Create an extension module. """
pass
def exec_builtin(*args, **kwargs): # real signature unknown
""" Initialize a built-in module. """
pass
def exec_dynamic(*args, **kwargs): # real signature unknown
""" Initialize an extension module. """
pass
def extension_suffixes(*args, **kwargs): # real signature unknown
""" Returns the list of file suffixes used to identify extension modules. """
pass
def get_frozen_object(*args, **kwargs): # real signature unknown
""" Create a code object for a frozen module. """
pass
def init_frozen(*args, **kwargs): # real signature unknown
""" Initializes a frozen module. """
pass
def is_builtin(*args, **kwargs): # real signature unknown
""" Returns True if the module name corresponds to a built-in module. """
pass
def is_frozen(*args, **kwargs): # real signature unknown
""" Returns True if the module name corresponds to a frozen module. """
pass
def is_frozen_package(*args, **kwargs): # real signature unknown
""" Returns True if the module name is of a frozen package. """
pass
def lock_held(*args, **kwargs): # real signature unknown
"""
Return True if the import lock is currently held, else False.
On platforms without threads, return False.
"""
pass
def release_lock(*args, **kwargs): # real signature unknown
"""
Release the interpreter's import lock.
On platforms without threads, this function does nothing.
"""
pass
def _fix_co_filename(*args, **kwargs): # real signature unknown
"""
Changes code.co_filename to specify the passed-in file path.
code
Code object to change.
path
File path to use.
"""
pass
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "mappingproxy({'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\\n\\n All methods are either class or static methods to avoid the need to\\n instantiate the class.\\n\\n ', 'module_repr': <staticmethod object at 0x7fd1b260b048>, 'find_spec': <classmethod object at 0x7fd1b260b080>, 'find_module': <classmethod object at 0x7fd1b260b0b8>, 'create_module': <classmethod object at 0x7fd1b260b0f0>, 'exec_module': <classmethod object at 0x7fd1b260b128>, 'get_code': <classmethod object at 0x7fd1b260b198>, 'get_source': <classmethod object at 0x7fd1b260b208>, 'is_package': <classmethod object at 0x7fd1b260b278>, 'load_module': <classmethod object at 0x7fd1b260b2b0>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>})"
# variables with complex values
__spec__ = None # (!) real value is "ModuleSpec(name='_imp', loader=<class '_frozen_importlib.BuiltinImporter'>)"
| [
"sarthakpatidar15@gmail.com"
] | sarthakpatidar15@gmail.com |
a92353589502f2b857fbde712c4b2740871f7138 | e68c3cbb9d6291fcdd51adae8a55616dcfafe55c | /spf/mr/lambda_/visitor/get_all_predicates.py | 43cd13146bb18cb49f553127deab68a8e4eee648 | [] | no_license | Oneplus/pyspf | 26126f5094065960d5f034fea2be4709aa1a4c50 | 175f90b4f837aa60fd660cba850d10a82dd578a1 | refs/heads/master | 2016-08-12T15:18:25.606712 | 2015-11-22T02:49:07 | 2015-11-22T02:49:07 | 45,725,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | #!/usr/bin/env python
from spf.mr.language.type_.complex_type import ComplexType
from spf.mr.lambda_.logic_language_services import LogicLanguageServices
from spf.mr.lambda_.visitor.api import LogicalExpressionVisitorI
class GetAllPredicates(LogicalExpressionVisitorI):
def __init__(self):
self.predicates = set()
@staticmethod
def of(expr):
visitor = GetAllPredicates()
visitor.visit(expr)
return visitor.predicates
def visit_lambda(self, lambda_):
lambda_.get_argument().accept(self)
lambda_.get_body().accept(self)
def visit_literal(self, literal):
literal.get_predicate().accept(self)
for arg in literal.get_arguments():
arg.accept(self)
def visit_logical_constant(self, logical_constant):
if (isinstance(logical_constant.get_type(), ComplexType) and
not LogicLanguageServices.is_coordination_predicate(logical_constant) and
not LogicLanguageServices.is_array_index_predicate(logical_constant) and
not LogicLanguageServices.is_array_sub_predicate(logical_constant)):
self.predicates.add(logical_constant)
def visit_logical_expression(self, logical_expr):
logical_expr.accept(self)
def visit_variable(self, variable):
return
| [
"oneplus.lau@gmail.com"
] | oneplus.lau@gmail.com |
6698430d0534a3155967eb8132b19896306de410 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/1db4d26d8e394cf98ebb2776a7ba6fe5.py | c62d39d883d3124df740dd392844ba4d555c4865 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 427 | py | class Bob:
def hey(self, msg):
if self.is_silence(msg): return "Fine. Be that way!"
elif self.is_shout(msg): return "Woah, chill out!"
elif self.is_question(msg): return "Sure."
else: return "Whatever."
def is_silence(self, msg): return msg is None or msg.strip() == ''
def is_shout(self, msg): return msg.isupper()
def is_question(self, msg): return msg.endswith("?")
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
b24b55082c3a32467c6afbcd5ff6a5a3d8a35825 | abad82a1f487c5ff2fb6a84059a665aa178275cb | /Codewars/8kyu/sum-arrays/Python/test.py | faa9af95abd70d991ed0505af0f19b5b5af928ae | [
"MIT"
] | permissive | RevansChen/online-judge | 8ae55f136739a54f9c9640a967ec931425379507 | ad1b07fee7bd3c49418becccda904e17505f3018 | refs/heads/master | 2021-01-19T23:02:58.273081 | 2019-07-05T09:42:40 | 2019-07-05T09:42:40 | 88,911,035 | 9 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | # Python - 3.6.0
test.describe('Testing sum_array')
test.assert_equals(sum_array([]), 0)
test.assert_equals(sum_array([1, 2, 3]), 6)
test.assert_equals(sum_array([1.1, 2.2, 3.3]), 6.6)
test.assert_equals(sum_array([4, 5, 6]), 15)
test.assert_equals(sum_array(range(101)), 5050)
| [
"d79523@hotmail.com"
] | d79523@hotmail.com |
1f0a5fb8f0d658bce1db0b8ddd311f764e35e0b3 | d6eca1b4b056beb41ac494db7399e1f146099c97 | /chapter7/tickets.py | ffd2252421041380e52db53708e975b7c02002bf | [] | no_license | liangsongyou/python-crash-course-code | 15090b48d77de1115bfaaaa6e5638a9bb9b3c7cc | f369e18030f2aafe358dd0fab1e479ca7bf4ceb8 | refs/heads/master | 2021-05-08T06:42:29.147923 | 2017-08-11T06:41:30 | 2017-08-11T06:41:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py |
while True:
age = input('Enter your age: ')
age = int(age)
if age <= 3:
print('Your ticked is free.')
elif age > 3 and age <= 12:
print('Your ticket costs $10.')
elif age > 12 and age < 100:
print('Your ticket costs $15.')
else:
break | [
"ramzanm461@gmail.com"
] | ramzanm461@gmail.com |
92256c004ae0664949bd41cfb353f346ebcd4d51 | 8dc84558f0058d90dfc4955e905dab1b22d12c08 | /third_party/blink/tools/blinkpy/tool/commands/analyze_baselines_unittest.py | e4f87d47ea4bb316fe05d5395299a3a5e1ec8af5 | [
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | meniossin/src | 42a95cc6c4a9c71d43d62bc4311224ca1fd61e03 | 44f73f7e76119e5ab415d4593ac66485e65d700a | refs/heads/master | 2022-12-16T20:17:03.747113 | 2020-09-03T10:43:12 | 2020-09-03T10:43:12 | 263,710,168 | 1 | 0 | BSD-3-Clause | 2020-05-13T18:20:09 | 2020-05-13T18:20:08 | null | UTF-8 | Python | false | false | 1,865 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
from blinkpy.common.checkout.baseline_optimizer import BaselineOptimizer
from blinkpy.tool.commands.analyze_baselines import AnalyzeBaselines
from blinkpy.tool.commands.rebaseline_unittest import BaseTestCase
class _FakeOptimizer(BaselineOptimizer):
def read_results_by_directory(self, baseline_name):
if baseline_name.endswith('txt'):
return {'LayoutTests/passes/text.html': '123456'}
return {}
class TestAnalyzeBaselines(BaseTestCase):
command_constructor = AnalyzeBaselines
def setUp(self):
super(TestAnalyzeBaselines, self).setUp()
self.port = self.tool.port_factory.get('test')
self.tool.port_factory.get = (lambda port_name=None, options=None: self.port)
self.lines = []
self.command._optimizer_class = _FakeOptimizer
self.command._write = (lambda msg: self.lines.append(msg))
def test_default(self):
self.command.execute(optparse.Values(dict(suffixes='txt', missing=False, platform=None)), ['passes/text.html'], self.tool)
self.assertEqual(self.lines,
['passes/text-expected.txt:',
' (generic): 123456'])
def test_missing_baselines(self):
self.command.execute(
optparse.Values(
dict(
suffixes='png,txt',
missing=True,
platform=None)),
['passes/text.html'],
self.tool)
self.assertEqual(self.lines,
['passes/text-expected.png: (no baselines found)',
'passes/text-expected.txt:',
' (generic): 123456'])
| [
"arnaud@geometry.ee"
] | arnaud@geometry.ee |
608b5e977d1b0a6a8f79dfba90f9ab04016f5ca6 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /lightsail_write_3/disk_attach.py | b938c3125f8d75bda179ba8d7dde6dc7059c53e4 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,459 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_three_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lightsail/attach-disk.html
if __name__ == '__main__':
"""
create-disk : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lightsail/create-disk.html
delete-disk : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lightsail/delete-disk.html
detach-disk : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lightsail/detach-disk.html
get-disk : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lightsail/get-disk.html
get-disks : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lightsail/get-disks.html
"""
parameter_display_string = """
# disk-name : The unique Lightsail disk name (e.g., my-disk ).
# instance-name : The name of the Lightsail instance where you want to utilize the storage disk.
# disk-path : The disk path to expose to the instance (e.g., /dev/xvdf ).
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_three_parameter("lightsail", "attach-disk", "disk-name", "instance-name", "disk-path", add_option_dict)
| [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
5dadcf6769ca9cfb7e4763f9392c7c09c08719c0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_betrayals.py | 05743bfbaf3cff77441e9c0efd52c66000980796 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _BETRAYALS():
def __init__(self,):
self.name = "BETRAYALS"
self.definitions = betrayal
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['betrayal']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
c3c01681e8c4b575ef58c93015e09cc5d33ba439 | c618bbf2719431999b1007461df0865bab60c883 | /docs/examples/use_cases/tensorflow/efficientdet/pipeline/dali/ops_util.py | 95646d590d6ac4f04b21f56c818feb8efd9f31f9 | [
"Apache-2.0"
] | permissive | NVIDIA/DALI | 3d0d061135d19e092647e6522046b2ff23d4ef03 | 92ebbe5c20e460050abd985acb590e6c27199517 | refs/heads/main | 2023-09-04T01:53:59.033608 | 2023-09-01T13:45:03 | 2023-09-01T13:45:03 | 135,768,037 | 4,851 | 648 | Apache-2.0 | 2023-09-12T18:00:22 | 2018-06-01T22:18:01 | C++ | UTF-8 | Python | false | false | 5,776 | py | # Copyright 2021 Kacper Kluk, Paweł Anikiel, Jagoda Kamińska. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import math
import nvidia.dali as dali
def input_tfrecord(
tfrecord_files, tfrecord_idxs, device, shard_id, num_shards, random_shuffle=True
):
inputs = dali.fn.readers.tfrecord(
path=tfrecord_files,
index_path=tfrecord_idxs,
features={
"image/encoded": dali.tfrecord.FixedLenFeature(
(), dali.tfrecord.string, ""
),
"image/height": dali.tfrecord.FixedLenFeature((), dali.tfrecord.int64, -1),
"image/width": dali.tfrecord.FixedLenFeature((), dali.tfrecord.int64, -1),
"image/object/bbox/xmin": dali.tfrecord.VarLenFeature(
dali.tfrecord.float32, 0.0
),
"image/object/bbox/xmax": dali.tfrecord.VarLenFeature(
dali.tfrecord.float32, 0.0
),
"image/object/bbox/ymin": dali.tfrecord.VarLenFeature(
dali.tfrecord.float32, 0.0
),
"image/object/bbox/ymax": dali.tfrecord.VarLenFeature(
dali.tfrecord.float32, 0.0
),
"image/object/class/label": dali.tfrecord.VarLenFeature(
dali.tfrecord.int64, 0
),
},
shard_id=shard_id,
num_shards=num_shards,
random_shuffle=random_shuffle,
)
images = dali.fn.decoders.image(
inputs["image/encoded"],
device="mixed" if device == "gpu" else "cpu",
output_type=dali.types.RGB,
)
xmin = inputs["image/object/bbox/xmin"]
xmax = inputs["image/object/bbox/xmax"]
ymin = inputs["image/object/bbox/ymin"]
ymax = inputs["image/object/bbox/ymax"]
bboxes = dali.fn.transpose(dali.fn.stack(xmin, ymin, xmax, ymax), perm=[1, 0])
classes = dali.fn.cast(inputs["image/object/class/label"], dtype=dali.types.INT32)
return (
images,
bboxes,
classes,
dali.fn.cast(inputs["image/width"], dtype=dali.types.FLOAT),
dali.fn.cast(inputs["image/height"], dtype=dali.types.FLOAT),
)
def input_coco(
images_path, annotations_path, device, shard_id, num_shards, random_shuffle=True
):
encoded, bboxes, classes = dali.fn.readers.coco(
file_root=images_path,
annotations_file=annotations_path,
ratio=True,
ltrb=True,
shard_id=shard_id,
num_shards=num_shards,
random_shuffle=random_shuffle,
)
images = dali.fn.decoders.image(
encoded,
device="mixed" if device == "gpu" else "cpu",
output_type=dali.types.RGB,
)
shape = dali.fn.peek_image_shape(encoded, dtype=dali.types.FLOAT)
heights = shape[0]
widths = shape[1]
return (
images,
bboxes,
classes,
widths,
heights,
)
def normalize_flip(images, bboxes, p=0.5):
flip = dali.fn.random.coin_flip(probability=p)
images = dali.fn.crop_mirror_normalize(
images,
mirror=flip,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
output_layout=dali.types.NHWC
)
bboxes = dali.fn.bb_flip(bboxes, horizontal=flip, ltrb=True)
return images, bboxes
def gridmask(images, widths, heights):
p = dali.fn.random.coin_flip()
ratio = 0.4 * p
angle = dali.fn.random.normal(mean=-1, stddev=1) * 10.0 * (math.pi / 180.0)
l = dali.math.min(0.5 * heights, 0.3 * widths)
r = dali.math.max(0.5 * heights, 0.3 * widths)
tile = dali.fn.cast(
(dali.fn.random.uniform(range=[0.0, 1.0]) * (r - l) + l),
dtype=dali.types.INT32,
)
gridmask = dali.fn.grid_mask(
images, ratio=ratio, angle=angle, tile=tile
)
return images
def random_crop_resize(
images, bboxes, classes, widths, heights, output_size, scaling=[0.1, 2.0]
):
if scaling is None:
scale_factor = 1.0
else:
scale_factor = dali.fn.random.uniform(range=scaling)
sizes = dali.fn.stack(heights, widths)
image_scale = dali.math.min(
scale_factor * output_size[0] / widths,
scale_factor * output_size[1] / heights,
)
scaled_sizes = dali.math.floor(sizes * image_scale + 0.5)
images = dali.fn.resize(
images,
size=scaled_sizes
)
anchors, shapes, bboxes, classes = dali.fn.random_bbox_crop(
bboxes,
classes,
crop_shape=output_size,
input_shape=dali.fn.cast(scaled_sizes, dtype=dali.types.INT32),
bbox_layout="xyXY",
allow_no_crop=False,
total_num_attempts=64,
)
images = dali.fn.slice(
images,
anchors,
shapes,
normalized_anchor=False,
normalized_shape=False,
out_of_bounds_policy="pad"
)
return (
images,
bboxes,
classes,
)
def bbox_to_effdet_format(bboxes, image_size):
w = image_size[0]
h = image_size[1]
M = [0.0, h, 0.0, 0.0,
w, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, h,
0.0, 0.0, w, 0.0]
return dali.fn.coord_transform(bboxes, M=M)
| [
"noreply@github.com"
] | NVIDIA.noreply@github.com |
88cab9cb1c0d4c437de927380f3ad17b376e84de | 9e8d98c48035d4ee61fa930c324c822a61e5ae55 | /_examples/chebyshevinputs.py | fd844e75495d4edbc26e2c045064fd1e94220c7e | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | GRSEB9S/mystic | 59ac0c284a19f7b685a98420cd49d21bb10ff0cd | 748e0030c8d7d8b005f2eafa17a4581c2b3ddb47 | refs/heads/master | 2021-08-14T07:11:04.439139 | 2017-11-14T23:49:22 | 2017-11-14T23:49:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | #!/usr/bin/env python
#
# Author: Alta Fang (altafang @caltech and alta @princeton)
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2017 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/mystic/blob/master/LICENSE
"""
chebyshevinputs.py -- cost function container module for NelderMeadSimplexSolver
and PowellDirectionalSolver for testsolvers_pyre.py
"""
from mystic.models.poly import chebyshev8cost as cost
from mystic.models.poly import chebyshev8coeffs
from mystic.termination import *
ND = 9
maxiter = 999
from numpy import inf
import random
from mystic.tools import random_seed
random_seed(123)
x0 = [random.uniform(-5,5) + chebyshev8coeffs[i] for i in range(ND)]
# used with SetStrictRanges
min_bounds = [ 0,-1,-300,-1, 0,-1,-100,-inf,-inf]
max_bounds = [200, 1, 0, 1,200, 1, 0, inf, inf]
termination = CandidateRelativeTolerance()
#termination = VTR()
#termination = ChangeOverGeneration()
#termination = NormalizedChangeOverGeneration()
# End of file
| [
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] | mmckerns@968178ea-60bd-409e-af13-df8a517b6005 |
05778ada3455877fbc552bfc4121758fc5656931 | 31fb7c74b94e46a325e6b05501c6972a401cf423 | /PYTHON/BASIC_PYTHON/수업내용/02/02-004.py | 817847885b9201127341bbd8b38a25796a623130 | [] | no_license | superf2t/TIL | f2dacc30d6b89f3717c0190ac449730ef341f6a4 | cadaaf952c44474bed9b8af71e70754f3dbf86fa | refs/heads/master | 2022-04-10T13:55:24.019310 | 2019-12-12T11:15:31 | 2019-12-12T11:15:31 | 268,215,746 | 1 | 0 | null | 2020-05-31T05:32:46 | 2020-05-31T05:32:46 | null | UTF-8 | Python | false | false | 132 | py | #02-004.py
print('yyyy', 'mm', 'dd')
print('2022', '02', '20', sep='-', end='')
print(' -- BIG EVENT DAY --')
| [
"noreply@github.com"
] | superf2t.noreply@github.com |
8802f191cac1c6ef42a71b05affacdd0c7c9eebf | cc231776124e9b596e4d9557ec09f9275d15eb20 | /example/DjangoApp/wsgi.py | 3b1bae77e146866ec61b9f61a19834583fb350b3 | [
"MIT"
] | permissive | grengojbo/docker-django | 2c742bc9f46939b6614b2f49a0981b848c250851 | 8e1f4b3ce622bc1fd3a9127f4f3519ace7aeee5b | refs/heads/master | 2016-09-15T18:20:41.607328 | 2014-09-12T12:51:37 | 2014-09-12T12:51:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,604 | py | """
WSGI config for fiber project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import SocketServer
from wsgiref import handlers
SocketServer.BaseServer.handle_error = lambda *args, **kwargs: None
handlers.BaseHandler.log_exception = lambda *args, **kwargs: None
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "fiber.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoApp.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| [
"oleg.dolya@gmail.com"
] | oleg.dolya@gmail.com |
9451e8ae129916ff18db2c412765cd8c447a2097 | f3c891e43aa828ad0653e6c6bf918f46b78ad7b4 | /setup.py | 5e6d4a4c8d2416841ef9bb3af7755c0fd3db1d10 | [] | no_license | ojustino/pywwt-web | 6a655c1b30c43c2c44ab425a4b815345831e1642 | db70c20da496c52f400cdae0f301d7b5940f530a | refs/heads/master | 2021-06-16T16:08:50.804918 | 2017-11-27T16:40:18 | 2017-11-27T16:40:18 | 112,221,435 | 0 | 0 | null | 2017-11-27T16:38:31 | 2017-11-27T16:38:30 | null | UTF-8 | Python | false | false | 5,854 | py | from __future__ import print_function
from setuptools import setup, find_packages, Command
from setuptools.command.sdist import sdist
from setuptools.command.build_py import build_py
from setuptools.command.egg_info import egg_info
from subprocess import check_call
import os
import sys
import platform
here = os.path.dirname(os.path.abspath(__file__))
node_root = os.path.join(here, 'js')
is_repo = os.path.exists(os.path.join(here, '.git'))
npm_path = os.pathsep.join([
os.path.join(node_root, 'node_modules', '.bin'),
os.environ.get('PATH', os.defpath),
])
from distutils import log
log.set_verbosity(log.DEBUG)
log.info('setup.py entered')
log.info('$PATH=%s' % os.environ['PATH'])
LONG_DESCRIPTION = 'WorldWideTelescope Jupyter widget'
def js_prerelease(command, strict=False):
"""decorator for building minified js/css prior to another command"""
class DecoratedCommand(command):
def run(self):
jsdeps = self.distribution.get_command_obj('jsdeps')
if not is_repo and all(os.path.exists(t) for t in jsdeps.targets):
# sdist, nothing to do
command.run(self)
return
try:
self.distribution.run_command('jsdeps')
except Exception as e:
missing = [t for t in jsdeps.targets if not os.path.exists(t)]
if strict or missing:
log.warn('rebuilding js and css failed')
if missing:
log.error('missing files: %s' % missing)
raise e
else:
log.warn('rebuilding js and css failed (not a problem)')
log.warn(str(e))
command.run(self)
update_package_data(self.distribution)
return DecoratedCommand
def update_package_data(distribution):
"""update package_data to catch changes during setup"""
build_py = distribution.get_command_obj('build_py')
# distribution.package_data = find_package_data()
# re-init build_py options which load package_data
build_py.finalize_options()
class NPM(Command):
description = 'install package.json dependencies using npm'
user_options = []
node_modules = os.path.join(node_root, 'node_modules')
targets = [
os.path.join(here, 'pywwt_web', 'static', 'extension.js'),
os.path.join(here, 'pywwt_web', 'static', 'index.js')
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def get_npm_name(self):
npmName = 'npm';
if platform.system() == 'Windows':
npmName = 'npm.cmd';
return npmName;
def has_npm(self):
npmName = self.get_npm_name();
try:
check_call([npmName, '--version'])
return True
except:
return False
def should_run_npm_install(self):
package_json = os.path.join(node_root, 'package.json')
node_modules_exists = os.path.exists(self.node_modules)
return self.has_npm()
def run(self):
has_npm = self.has_npm()
if not has_npm:
log.error("`npm` unavailable. If you're running this command using sudo, make sure `npm` is available to sudo")
env = os.environ.copy()
env['PATH'] = npm_path
if self.should_run_npm_install():
log.info("Installing build dependencies with npm. This may take a while...")
npmName = self.get_npm_name();
check_call([npmName, 'install'], cwd=node_root, stdout=sys.stdout, stderr=sys.stderr)
os.utime(self.node_modules, None)
for t in self.targets:
if not os.path.exists(t):
msg = 'Missing file: %s' % t
if not has_npm:
msg += '\nnpm is required to build a development version of a widget extension'
raise ValueError(msg)
# update package data in case this created new files
update_package_data(self.distribution)
version_ns = {}
with open(os.path.join(here, 'pywwt_web', '_version.py')) as f:
exec(f.read(), {}, version_ns)
setup_args = {
'name': 'pywwt_web',
'version': version_ns['__version__'],
'description': 'WorldWideTelescope Jupyter widget',
'long_description': LONG_DESCRIPTION,
'include_package_data': True,
'data_files': [
('share/jupyter/nbextensions/pywwt_web', [
'pywwt_web/static/extension.js',
'pywwt_web/static/index.js',
'pywwt_web/static/index.js.map',
'pywwt_web/static/wwt.html',
'pywwt_web/static/wwt_json_api.js',
]),
],
'install_requires': [
'ipywidgets>=7.0.0',
'ipyevents',
'traitlets',
'astropy',
],
'packages': find_packages(),
'zip_safe': False,
'cmdclass': {
'build_py': js_prerelease(build_py),
'egg_info': js_prerelease(egg_info),
'sdist': js_prerelease(sdist, strict=True),
'jsdeps': NPM,
},
'author': 'Thomas P. Robitaille',
'author_email': 'thomas.robitaille@gmail.com',
'url': 'https://github.com/astrofrog/pywwt_web',
'keywords': [
'ipython',
'jupyter',
'widgets',
],
'classifiers': [
'Development Status :: 4 - Beta',
'Framework :: IPython',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Multimedia :: Graphics',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
]
}
setup(**setup_args)
| [
"thomas.robitaille@gmail.com"
] | thomas.robitaille@gmail.com |
3d3bc331ab73351b87c588a6cf3ba8fd4c1b2615 | 036a41c913b3a4e7ae265e22a672dd89302d3200 | /0701-0800/0753/0753_Python_1.py | d567ce2e39bf38d32a69b1d68ed59a8f1b2f5b4c | [] | no_license | ChangxingJiang/LeetCode | e76f96ebda68d7ade53575354479cfc33ad4f627 | a2209206cdd7229dd33e416f611e71a984a8dd9e | refs/heads/master | 2023-04-13T15:23:35.174390 | 2021-04-24T05:54:14 | 2021-04-24T05:54:14 | 272,088,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | class Solution:
def crackSafe(self, n: int, k: int) -> str:
visited = set()
def dfs(s1):
visited.add(s1)
for v in range(k):
s2 = s1[1:] + str(v)
if s2 not in visited:
dfs(s2)
stack.append(s1[0])
# 处理其他进制的情况
stack = ["0" * (n - 1)]
dfs("0" * n)
return "".join(stack[::-1])
if __name__ == "__main__":
print(Solution().crackSafe(1, 1)) # 0
print(Solution().crackSafe(1, 2)) # 01
print(Solution().crackSafe(2, 2)) # 00110
print(Solution().crackSafe(3, 2)) # 0011101000
| [
"1278729001@qq.com"
] | 1278729001@qq.com |
6d5cafe13cb9ef0722a1efcabebe9b56f3fa71b8 | bfda3af75d94767a5cb265bd68c17cfbf94e3ee1 | /rabbithole/zombit_infection/solution.py | 89fa08b0dd806b896d3f205eee6feea9361d0b2f | [] | no_license | orenlivne/euler | d0e5b956a46eacfe423fbd6c52918beb91eea140 | 2afdd8bccdc5789c233e955b1ca626cea618eb9b | refs/heads/master | 2020-12-29T02:24:36.479708 | 2016-12-15T21:27:33 | 2016-12-15T21:27:33 | 20,263,482 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,420 | py | #-------------------------------------------------------------------------------
# Rabbit hole - zombit_infection problem
#-------------------------------------------------------------------------------
INFECTED = -1
MAX_STRENGTH = 10000
class Population(object):
__DUMMY = MAX_STRENGTH + 1
def __init__(self, population):
self.__population = Population.__pad(population)
def __eq__(self, other):
return self.__population == other.__population
def __ne__(self, other):
return self.__population != other.__population
def __repr__(self):
return self.population().__repr__()
def copy(self):
# Returns a deep copy of the 2D population array.
other = Population([[0]])
other.__population = [row[:] for row in self.__population]
return other
def population(self):
return [row[1:-1] for row in self.__population[1:-1]]
def attempt_to_infect(self, i, j, strength):
# Accounts for padding.
self.__attempt_to_infect(i+1, j+1, strength)
def spread_infection(self, strength):
n, m = self.__size()
for i in xrange(1, n):
for j in xrange(1, m):
self.__spread_infection_at(i, j, strength)
def __spread_infection_at(self, i, j, strength):
p = self.__population
if p[i][j] == INFECTED:
self.__attempt_to_infect(i-1, j, strength)
self.__attempt_to_infect(i+1, j, strength)
self.__attempt_to_infect(i, j-1, strength)
self.__attempt_to_infect(i, j+1, strength)
def __attempt_to_infect(self, i, j, strength):
if self.__population[i][j] <= strength: self.__population[i][j] = INFECTED
@staticmethod
def __pad(population):
m = len(population[0])
d = Population.__DUMMY
return [[d] * m] + [[d] + row + [d] for row in population] + [[d] * m]
def __size(self):
return len(self.__population), len(self.__population[0])
def answer(population, x, y, strength):
p = Population(population)
old_p = p.copy()
p.attempt_to_infect(y, x, strength)
while p != old_p:
old_p = p.copy()
p.spread_infection(strength)
return p.population()
if __name__ == '__main__':
assert answer([[1, 2, 3], [2, 3, 4], [3, 2, 1]], 0, 0, 2) == [[-1, -1, 3], [-1, 3, 4], [3, 2, 1]]
assert answer([[6, 7, 2, 7, 6], [6, 3, 1, 4, 7], [0, 2, 4, 1, 10], [8, 1, 1, 4, 9], [8, 7, 4, 9, 9]], 2, 1, 5) == [[6, 7, -1, 7, 6], [6, -1, -1, -1, 7], [-1, -1, -1, -1, 10], [8, -1, -1, -1, 9], [8, 7, -1, 9, 9]]
| [
"oren.livne@gmail.com"
] | oren.livne@gmail.com |
280b56265c17385bb306b00dd2eac116091880da | 632099ac0d895943cbbeb9048a2cdfcd21102411 | /LV2_LX2_LC2_LD2/FaderfoxScript.py | ab0b79d02ffebba36ac72ee6d9b6a6dc837d6ef1 | [] | no_license | Toniigor/AbletonLive9_RemoteScripts | 7f4bbf759a79629584413f6d1797005e8cd7f2ff | fed1e5ee61ea12ea6360107a65a6e666364353ff | refs/heads/master | 2021-01-16T21:19:25.330221 | 2014-06-06T12:33:03 | 2014-06-06T12:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,154 | py | #Embedded file name: /Users/versonator/Jenkins/live/Projects/AppLive/Resources/MIDI Remote Scripts/LV2_LX2_LC2_LD2/FaderfoxScript.py
import Live
from consts import *
import sys
from FaderfoxHelper import FaderfoxHelper
from ParamMap import ParamMap
from Devices import *
class FaderfoxScript:
__filter_funcs__ = ['update_display', 'log', 'song']
__module__ = __name__
__doc__ = 'Automap script for Faderfox controllers'
__version__ = 'V1.1'
__name__ = 'Generic Faderfox Script'
def __init__(self, c_instance):
self.suffix = ''
self.is_lv1 = False
FaderfoxScript.realinit(self, c_instance)
def realinit(self, c_instance):
self.c_instance = c_instance
self.helper = FaderfoxHelper(self)
self.param_map = ParamMap(self)
self.mixer_controller = None
self.device_controller = None
self.transport_controller = None
self.components = []
live = 'Live 6 & 7'
if self.is_live_5():
live = 'Live 5'
self.show_message(self.__name__ + ' ' + self.__version__ + ' for ' + live)
self.is_lv1 = False
def is_live_5(self):
return hasattr(Live, 'is_live_5')
def log(self, string):
pass
def logfmt(self, fmt, *args):
pass
def disconnect(self):
for c in self.components:
c.disconnect()
def application(self):
return Live.Application.get_application()
def song(self):
return self.c_instance.song()
def suggest_input_port(self):
return str('')
def suggest_output_port(self):
return str('')
def can_lock_to_devices(self):
return True
def lock_to_device(self, device):
if self.device_controller:
self.device_controller.lock_to_device(device)
def unlock_to_device(self, device):
if self.device_controller:
self.device_controller.unlock_from_device(device)
def set_appointed_device(self, device):
if self.device_controller:
self.device_controller.set_appointed_device(device)
def toggle_lock(self):
self.c_instance.toggle_lock()
def suggest_map_mode(self, cc_no, channel):
return Live.MidiMap.MapMode.absolute
def restore_bank(self, bank):
pass
def show_message(self, message):
if hasattr(self.c_instance, 'show_message'):
self.c_instance.show_message(message)
def instance_identifier(self):
return self.c_instance.instance_identifier()
def connect_script_instances(self, instanciated_scripts):
pass
def request_rebuild_midi_map(self):
self.c_instance.request_rebuild_midi_map()
def send_midi(self, midi_event_bytes):
self.c_instance.send_midi(midi_event_bytes)
def refresh_state(self):
for c in self.components:
c.refresh_state()
def build_midi_map(self, midi_map_handle):
self.log('script build midi map')
script_handle = self.c_instance.handle()
self.param_map.remove_mappings()
for c in self.components:
self.log('build midi map on %s' % c)
c.build_midi_map(script_handle, midi_map_handle)
def update_display(self):
for c in self.components:
c.update_display()
def receive_midi(self, midi_bytes):
channel = midi_bytes[0] & CHAN_MASK
status = midi_bytes[0] & STATUS_MASK
if status == CC_STATUS:
cc_no = midi_bytes[1]
cc_value = midi_bytes[2]
for c in self.components:
c.receive_midi_cc(channel, cc_no, cc_value)
self.param_map.receive_midi_cc(channel, cc_no, cc_value)
elif status == NOTEON_STATUS or status == NOTEOFF_STATUS:
note_no = midi_bytes[1]
note_vel = midi_bytes[2]
for c in self.components:
c.receive_midi_note(channel, status, note_no, note_vel)
self.param_map.receive_midi_note(channel, status, note_no, note_vel)
else:
raise False or AssertionError, 'Unknown MIDI message %s' % str(midi_bytes) | [
"julien@julienbayle.net"
] | julien@julienbayle.net |
60e5e0f2797c0695cd4072eff7577dd65a303961 | a0dda8be5892a390836e19bf04ea1d098e92cf58 | /视频+刷题/python3/匿名函数的应用.py | 540378ee420e3f304633dae548a39b13d84afdac | [] | no_license | wmm98/homework1 | d9eb67c7491affd8c7e77458ceadaf0357ea5e6b | cd1f7f78e8dbd03ad72c7a0fdc4a8dc8404f5fe2 | refs/heads/master | 2020-04-14T19:22:21.733111 | 2019-01-08T14:09:58 | 2019-01-08T14:09:58 | 164,055,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | '''用匿名函数实现多个功能'''
'''
def sum(a, b, function):
result = function(a, b)
print(result)
sum(22, 11, lambda x, y: x + y)
sum(22, 11, lambda x, y: x - y)
'''
def num(a, b, func):
result = func(a, b)
print(result)
func_new = input("请输入一个新的匿名函数")
func_new = eval(func_new) # 转换
num(11, 12, func_new)
| [
"792545884@qq.com"
] | 792545884@qq.com |
b9f8c48b18733c4773a4d947d22798aadc2d97fe | e9aa61aa74eb69f946a66b6ac5a90f12ec744295 | /tests/integration/test_resolve_command.py | d4b4d243f266411ac5114bfc3f14e42ef0a36bfa | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | abueide/lbry | 2c2bc35255f82fade22561d673b299c0df35dfea | 7f5deaf6c80422a30b3714d4bf12e028756ed9fe | refs/heads/master | 2021-01-21T12:58:41.093354 | 2019-04-12T12:58:00 | 2019-04-16T18:18:23 | 181,083,296 | 0 | 0 | MIT | 2019-04-12T21:04:56 | 2019-04-12T21:04:56 | null | UTF-8 | Python | false | false | 3,855 | py | import json
from lbrynet.testcase import CommandTestCase
class ResolveCommand(CommandTestCase):
async def test_resolve(self):
tx = await self.channel_create('@abc', '0.01')
channel_id = tx['outputs'][0]['claim_id']
# resolving a channel @abc
response = await self.resolve('lbry://@abc')
self.assertSetEqual({'lbry://@abc'}, set(response))
self.assertIn('certificate', response['lbry://@abc'])
self.assertNotIn('claim', response['lbry://@abc'])
self.assertEqual(response['lbry://@abc']['certificate']['name'], '@abc')
self.assertEqual(response['lbry://@abc']['claims_in_channel'], 0)
await self.stream_create('foo', '0.01', channel_id=channel_id)
await self.stream_create('foo2', '0.01', channel_id=channel_id)
# resolving a channel @abc with some claims in it
response = await self.resolve('lbry://@abc')
self.assertSetEqual({'lbry://@abc'}, set(response))
self.assertIn('certificate', response['lbry://@abc'])
self.assertNotIn('claim', response['lbry://@abc'])
self.assertEqual(response['lbry://@abc']['certificate']['name'], '@abc')
self.assertEqual(response['lbry://@abc']['claims_in_channel'], 2)
# resolving claim foo within channel @abc
response = await self.resolve('lbry://@abc/foo')
self.assertSetEqual({'lbry://@abc/foo'}, set(response))
claim = response['lbry://@abc/foo']
self.assertIn('certificate', claim)
self.assertIn('claim', claim)
self.assertEqual(claim['claim']['name'], 'foo')
self.assertEqual(claim['claim']['channel_name'], '@abc')
self.assertEqual(claim['certificate']['name'], '@abc')
self.assertEqual(claim['claims_in_channel'], 0)
# resolving claim foo by itself
response = await self.resolve('lbry://foo')
self.assertSetEqual({'lbry://foo'}, set(response))
claim = response['lbry://foo']
self.assertIn('certificate', claim)
self.assertIn('claim', claim)
self.assertEqual(claim['claim']['name'], 'foo')
self.assertEqual(claim['claim']['channel_name'], '@abc')
self.assertEqual(claim['certificate']['name'], '@abc')
self.assertEqual(claim['claims_in_channel'], 0)
# resolving from the given permanent url
new_response = await self.resolve(claim['claim']['permanent_url'])
self.assertEqual(new_response[claim['claim']['permanent_url']], claim)
# resolving multiple at once
response = await self.resolve(['lbry://foo', 'lbry://foo2'])
self.assertSetEqual({'lbry://foo', 'lbry://foo2'}, set(response))
claim = response['lbry://foo2']
self.assertIn('certificate', claim)
self.assertIn('claim', claim)
self.assertEqual(claim['claim']['name'], 'foo2')
self.assertEqual(claim['claim']['channel_name'], '@abc')
self.assertEqual(claim['certificate']['name'], '@abc')
self.assertEqual(claim['claims_in_channel'], 0)
# resolve has correct depth
tx_details = await self.blockchain.get_raw_transaction(claim['claim']['txid'])
self.assertEqual(claim['claim']['depth'], json.loads(tx_details)['confirmations'])
# resolve handles invalid data
txid = await self.blockchain_claim_name("gibberish", "cafecafe", "0.1")
await self.generate(1)
response = await self.resolve("lbry://gibberish")
self.assertSetEqual({'lbry://gibberish'}, set(response))
claim = response['lbry://gibberish']['claim']
self.assertEqual(claim['name'], 'gibberish')
self.assertEqual(claim['hex'], 'cafecafe')
self.assertFalse(claim['decoded_claim'])
self.assertEqual(claim['txid'], txid)
self.assertEqual(claim['effective_amount'], "0.1")
| [
"lex@damoti.com"
] | lex@damoti.com |
6463120100067e88f76a9d5be84a539e55539baa | 5682dc024dd37ea0753d57819bab8a4891d6bb31 | /my_tiff_package/my_tiff_package/__init__.py | 34da447454fa182f4e7bc7d7df34feb2f0f2bde8 | [] | no_license | danielballan/reader-intake-adapter | 20424026ecc23f7aa9ab6ae2035988e60b0ca244 | f06ae1c9aef4e8277e81b5903a54d1a124590457 | refs/heads/master | 2022-04-07T05:50:38.877360 | 2020-03-05T15:12:10 | 2020-03-05T15:12:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,752 | py | import dask.array
import tifffile
class TIFFReader:
"""
Accepts file, filepath, or filepath glob.
"""
container = 'dask.array.core.Array'
def __init__(self, file):
if isinstance(file, str):
# file is a filepath or filepath glob
import os
if os.path.isfile(file):
self._tiff_files = [tifffile.TiffFile(file)]
else:
import glob
self._tiff_files = [tifffile.TiffFile(file_)
for file_ in glob.glob(file)]
else:
# file is a file buffer
self._tiff_files = [tifffile.TiffFile(file)]
self._file = file # only used in __repr__
self._closed = False
def __repr__(self):
return f"{self.__class__.__name__}({self._file!r})"
def read(self):
if self._closed:
raise Closed(f"{self} is closed and can no longer be read.")
stack = []
for tf in self._tiff_files:
assert len(tf.series) == 1 # should be True by construction
series = tf.series[0]
dtype = series.dtype
for page in series.pages:
stack.append(dask.array.from_delayed(
dask.delayed(page.asarray)(),
shape=page.shape, dtype=dtype))
return dask.array.stack(stack)
def close(self):
self._closed = True
for tf in self._tiff_files:
tf.close()
def __enter__(self):
return self
def __exit__(self, *exc_details):
self.close()
class Closed(Exception):
...
# intake compatibility
from reader_adapter import adapt # noqa
TIFFDataSource = adapt(TIFFReader, 'TIFFDataSource')
| [
"dallan@bnl.gov"
] | dallan@bnl.gov |
282f9f494bed1bf028d83c8f4c05f917f6111523 | 29da2ca6def1270be13a3096685a8e5d82828dff | /CIM14/CPSM/Equipment/Wires/SeriesCompensator.py | 3d24c9da21e451acecdfd4f1a62596944c6ded43 | [
"MIT"
] | permissive | rimbendhaou/PyCIM | 75eb3bcd3729b2410c03f3d5c66d6f1e05e21df3 | d578bb0bf1af344342bd23344385ed9c06c2d0ee | refs/heads/master | 2022-04-28T01:16:12.673867 | 2020-04-16T02:19:09 | 2020-04-16T02:19:09 | 256,085,381 | 0 | 0 | MIT | 2020-04-16T02:15:20 | 2020-04-16T02:08:14 | null | UTF-8 | Python | false | false | 2,022 | py | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CPSM.Equipment.Core.ConductingEquipment import ConductingEquipment
class SeriesCompensator(ConductingEquipment):
"""A Series Compensator is a series capacitor or reactor or an AC transmission line without charging susceptance. It is a two terminal device.- [R9.3] is satisfied by navigation to ConnectivityNode and Substation
"""
def __init__(self, r=0.0, x=0.0, *args, **kw_args):
"""Initialises a new 'SeriesCompensator' instance.
@param r: Positive sequence resistance.
@param x: Positive sequence reactance.
"""
#: Positive sequence resistance.
self.r = r
#: Positive sequence reactance.
self.x = x
super(SeriesCompensator, self).__init__(*args, **kw_args)
_attrs = ["r", "x"]
_attr_types = {"r": float, "x": float}
_defaults = {"r": 0.0, "x": 0.0}
_enums = {}
_refs = []
_many_refs = []
| [
"rwl@thinker.cable.virginmedia.net"
] | rwl@thinker.cable.virginmedia.net |
19517ca7ef64c2333ba5aa5f106fb3d0b5e76ce3 | 0b9622c6d67ddcb252a7a4dd9b38d493dfc9a25f | /HackerRank/30daysChallenge/Day21.py | c3c18c68d2565763a4359b71e362ce6b8b0e1447 | [] | no_license | d80b2t/python | eff2b19a69b55d73c4734fb9bc115be1d2193e2d | 73603b90996221e0bcd239f9b9f0458b99c6dc44 | refs/heads/master | 2020-05-21T20:43:54.501991 | 2017-12-24T12:55:59 | 2017-12-24T12:55:59 | 61,330,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | """
Nota Bene: Day 21 was about Generics.
This was acutally not a Python solveable challenge (only C++, C#, Java 7, Java 8 and Swift) were offered, so no code was written here or progress made.
"""
| [
"npross@lbl.gov"
] | npross@lbl.gov |
b13111418e3b0d4dfe2eaa935cff789e858685ea | a983c40db193d9294ea93628c57f514e0e6e9c2a | /src/shop/api.py | 14c053a442e1c9fa5524098b2ef6d551d6b1c4fe | [
"MIT"
] | permissive | ElinSwedin/foobar-api | 80f4d1dd5e0dabefb80eab77bd92e4e8a277c9b9 | 7ab204894c7579dd3f9dec3d2cee1166eb046199 | refs/heads/develop | 2021-01-19T12:26:27.085054 | 2017-02-17T09:42:11 | 2017-02-17T11:05:38 | 82,311,799 | 2 | 0 | null | 2017-02-17T15:43:49 | 2017-02-17T15:43:49 | null | UTF-8 | Python | false | false | 7,973 | py | import logging
from django.db import transaction
from django.contrib.contenttypes.models import ContentType
from . import models, enums, suppliers, exceptions
log = logging.getLogger(__name__)
@transaction.atomic
def create_product(code, name):
"""Create an product"""
product_obj = models.Product(
code=code,
name=name
)
product_obj.save()
return product_obj
@transaction.atomic
def update_product(id, **kwargs):
product_obj = models.Product.objects.get(id=id)
for k, v in kwargs.items():
setattr(product_obj, k, v)
product_obj.save()
def get_product(id):
"""Return item with given id.
Returns None if the product does not exist.
"""
try:
return models.Product.objects.get(id=id)
except models.Product.DoesNotExist:
return None
def get_product_transactions_by_ref(reference):
"""Return item transactions with given reference."""
ct = ContentType.objects.get_for_model(reference)
return models.ProductTransaction.objects.filter(
reference_ct=ct,
reference_id=reference.pk,
)
@transaction.atomic
def create_product_transaction(product_id, trx_type, qty, reference=None):
"""
Create item transaction for given item.
It automagically takes care of updating the quantity for the product.
"""
product_obj = models.Product.objects.get(id=product_id)
ct = None
if reference is not None:
ct = ContentType.objects.get_for_model(reference)
trx_obj = product_obj.transactions.create(
trx_type=trx_type,
qty=qty,
reference_ct=ct,
reference_id=reference.pk if reference is not None else None
)
return trx_obj
@transaction.atomic
def cancel_product_transaction(trx_id):
trx_obj = models.ProductTransaction.objects.get(id=trx_id)
trx_obj.trx_status = enums.TrxStatus.CANCELED
trx_obj.save()
def list_products(start=None, limit=None, **kwargs):
"""Returns a list of products matching the criteria.
Criteria should be passed to the function as keyword arguments.
Criteria arguments support Django field lookups.
"""
return models.Product.objects.filter(**kwargs)[start:limit]
def list_categories():
return models.ProductCategory.objects.all()
@transaction.atomic
def get_supplier_product(supplier_id, sku):
"""Returns supplier product for given SKU.
If the product does not exist in the local database, fetch it from the
supplier.
"""
try:
return models.SupplierProduct.objects.get(
supplier_id=supplier_id,
sku=sku
)
except models.SupplierProduct.DoesNotExist:
pass
# Product has not been found in the database. Let's fetch it from
# the supplier.
supplier_obj = models.Supplier.objects.get(id=supplier_id)
supplier_api = suppliers.get_supplier_api(supplier_obj.internal_name)
product_data = supplier_api.retrieve_product(sku)
if product_data is None:
log.warning('Product not found (sku: %s, supplier: %s',
sku, supplier_id)
return None
product_obj = models.SupplierProduct.objects.create(
supplier_id=supplier_id,
sku=sku,
price=product_data.price,
name=product_data.name
)
return product_obj
def parse_report(supplier_internal_name, report_path):
"""Parses a report file and returns parsed items."""
supplier_api = suppliers.get_supplier_api(supplier_internal_name)
return supplier_api.parse_delivery_report(report_path)
@transaction.atomic
def populate_delivery(delivery_id):
"""Populates the delivery with products based on the imported report."""
delivery_obj = models.Delivery.objects.get(id=delivery_id)
supplier_obj = delivery_obj.supplier
items = parse_report(supplier_obj.internal_name, delivery_obj.report.path)
for item in items:
product_obj = get_supplier_product(supplier_obj.id, item.sku)
if product_obj is not None:
models.DeliveryItem.objects.create(
delivery=delivery_obj,
supplier_product_id=product_obj.id,
qty=item.qty * product_obj.qty_multiplier,
price=item.price / product_obj.qty_multiplier
)
return delivery_obj
@transaction.atomic
def process_delivery(delivery_id):
"""Adjusts the stock quantities based on the delivery data."""
delivery_obj = models.Delivery.objects.get(id=delivery_id)
assert delivery_obj.valid, ('Some of the delivered items are not '
'associated with a product in the system.')
for item in delivery_obj.delivery_items.all():
supplier_product = item.supplier_product
create_product_transaction(
product_id=supplier_product.product.id,
trx_type=enums.TrxType.INVENTORY,
qty=item.qty,
reference=item
)
delivery_obj.locked = True
delivery_obj.save()
@transaction.atomic
def initiate_stocktaking(chunk_size=10):
"""Initiates a stock-taking procedure for all the products."""
stocktake_qs = models.Stocktake.objects
# Make sure that there is no stock-taking in progress
if not stocktake_qs.filter(locked=False).count() == 0:
raise exceptions.APIException('Stock-taking already in progress.')
stocktake_obj = stocktake_qs.create()
# Order products by category, so that chunk contain mostly that share
# category. Products in the same category are most often placed near each
# other, which should make the process of stock-taking more effective.
product_objs = list(models.Product.objects.all().order_by('category'))
for i in range(0, len(product_objs), chunk_size):
chunk_obj = stocktake_obj.chunks.create()
chunk_products = product_objs[i:i + chunk_size]
for p in chunk_products:
chunk_obj.items.create(product=p)
return stocktake_obj
@transaction.atomic
def finalize_stocktaking(stocktake_id):
"""Applies the result of stock taking to the stock quantities."""
stocktake_obj = models.Stocktake.objects.get(id=stocktake_id)
if stocktake_obj.locked:
raise exceptions.APIException('Stock-taking already finished.')
# Make sure that all the chunks are finished
chunk_objs = stocktake_obj.chunks.all()
if not all(obj.locked for obj in chunk_objs):
raise exceptions.APIException('Found unfinished chunks.')
for chunk_obj in chunk_objs:
for item_obj in chunk_obj.items.all():
product_obj = item_obj.product
create_product_transaction(
product_id=product_obj.id,
trx_type=enums.TrxType.CORRECTION,
qty=item_obj.qty - product_obj.qty,
reference=item_obj
)
stocktake_obj.locked = True
stocktake_obj.save()
return stocktake_obj
def finalize_stocktake_chunk(chunk_id):
"""Marks given chunk as finished."""
chunk_obj = models.StocktakeChunk.objects.get(id=chunk_id)
if chunk_obj.locked:
raise exceptions.APIException('Chunk already locked.')
chunk_obj.locked = True
chunk_obj.owner = None
chunk_obj.save()
@transaction.atomic
def assign_free_stocktake_chunk(user_id, stocktake_id):
"""Assigns a free stock-take chunk to a user, if any free left.
If user is already assigned to a chunk, that chunk should be returned.
"""
chunk_qs = models.StocktakeChunk.objects.select_for_update()
try:
return chunk_qs.get(
stocktake_id=stocktake_id,
owner_id=user_id
)
except models.StocktakeChunk.DoesNotExist:
pass
chunk_objs = chunk_qs.filter(
stocktake_id=stocktake_id,
locked=False,
owner__isnull=True
)
if not chunk_objs:
return None
chunk_obj = chunk_objs.first()
chunk_obj.owner_id = user_id
chunk_obj.save()
return chunk_obj
| [
"me@kjagiello.com"
] | me@kjagiello.com |
1c656ad16666594fb3d4c6f4f1e6bba48319f683 | be0e0488a46b57bf6aff46c687d2a3080053e52d | /python/programmers/level1/문자열다루기기본.py | a210099dc2f10aaf38e4017dbeb93a0c2c6db444 | [] | no_license | syo0e/Algorithm | b3f8a0df0029e4d6c9cbf19dcfcb312ba25ea939 | 1ae754d5bb37d02f28cf1d50463a494896d5026f | refs/heads/master | 2023-06-09T11:31:54.266900 | 2021-06-30T17:04:38 | 2021-06-30T17:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | def isnumber(s):
for i in range(len(s)):
if s[i] < '0' or s[i] > '9':
return False
return True
def solution(s):
if (len(s) == 4 or len(s) == 6) and isnumber(s):
return True
return False
| [
"kyun2dot@gmail.com"
] | kyun2dot@gmail.com |
ecb1f30725bfd84a818036a942659d29d1dfdfa4 | c7009093f1e4d5db31d7fb5d876b46f5f9ac5268 | /week2/game_hobit/game.py | 5f8e1cf9d01888c4697f3705b59e8f2cd391fb2d | [] | no_license | mileto94/HackBulgaria | 4ea52ff306c202b9207f66e218ca79082832246a | 414b37dd102a3de5e976d4d97b1b2d95bb253892 | refs/heads/master | 2016-09-06T17:31:04.618614 | 2014-06-24T18:40:27 | 2014-06-24T18:40:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,197 | py | import random
from random import randint
class Entity():
"""docstring for Entity"""
def __init__(self,name, health):
self.name = name
self.health = health
def get_name(self):
return self.name
def get_health(self):
return self.health
def is_alive(self):
if self.health > 0:
return True
else:
return False
def equip_weapon(self, weapon_for_entity):
setattr(self, "weapon", weapon_for_entity)
return True
def has_weapon(self):
return hasattr(self, "weapon")
def take_damage(self, damage_points):
self.health -= damage_points
return self.health
def take_healing(self, healing_points):
if self.health > 0 and self.health < 100:
self.health += healing_points
return self.health
def attack(self):
if self.has_weapon():
return self.weapon.damage
else:
return 10
class Hero(Entity):
"""docstring for Hero"""
def __init__(self, name, health, nickname):
super().__init__(name, health)
self.nickname = nickname
def get_nickname(self):
return self.nickname
def known_as(self):
return self.name + " the " + self.nickname
class Orc(Entity):
"""docstring for Org"""
def __init__(self, name, health, berserk_factor):
super().__init__(name, health)
self.berserk_factor = berserk_factor
def attack(self):
return self.berserk_factor*(super().attack())
class Weapon():
"""docstring for Weapon"""
def __init__(self, Type, damage, critical_strike_percent):
self.Type = Type
self.damage = damage
self.critical_strike_percent = critical_strike_percent
def getType(self):
return self.Type
def getCriticalStrikePercent(self):
return self.critical_strike_percent
def critical_hit(self):
random_strike_percent = random.uniform(0,1)
if self.critical_strike_percent > random_strike_percent:
self.critical_strike_percent *= 2
return self.critical_strike_percent
else:
return self.critical_strike_percent
class Fight():
"""docstring for Fight"""
def __init__(self, new_hero, new_orc):
self.hero = new_hero
self.orc = new_orc
def who_starts(self):
random_who_starts = randint(1,100)
if random_who_starts <= 50:
first = self.hero
return first
else:
return self.orc
def get_opponent(self,first):
if first == self.hero:
return self.orc
else:
return self.hero
def simulate_fight(self):
first = self.who_starts()
print("%s starts" % first.name)
second = self.get_opponent(first)
if first.has_weapon()==False:
weapon_bare_hands = Weapon("his bare hands!", 5, 0.09)
setattr(Entity, "weapon", weapon_bare_hands)
if second.has_weapon()==False:
weapon_bare_hands = Weapon("his bare hands!", 5, 0.09)
setattr(Entity, "weapon", weapon_bare_hands)
while True:
damage = first.attack()
print("%s attacks with %s" % (first.name, first.weapon.Type))
second.take_damage(damage)
print("%s is hurted" % (first.name))
if second.is_alive() == False:
print("%s died" % second.name)
break
damage = second.attack()
print("%s attacks with %s" % (first.name, second.weapon.Type))
first.take_damage(damage)
print("%s is hurted" % (second.name))
if first.is_alive() == False:
print("%s died" % first.name)
break
if first.is_alive():
return first
return second
class Dungeon():
"""docstring for Dungein"""
def __init__(self, file_to_read):
file = open(file_to_read, "r")
self.unparsed_map = file.read()
file.close()
def get_map(self):
return self.unparsed_map
def print_map(self):
print(self.get_map())
| [
"mileto94@abv.bg"
] | mileto94@abv.bg |
2ed56086a41fca06fb78b34b10bedbfee178a202 | 621ca3f68b088699c42a16051df1d64b6a5ac505 | /virtual/bin/pip3 | da45e50dbe672a6aca5e1632cfb1450927ff6f5d | [
"MIT"
] | permissive | nziokaivy/hood | 2af3bdbf1c258e504da2a4550b524319cab907bb | f82dde0f132330589aacfeefde0229d0bb909b9c | refs/heads/master | 2020-04-30T20:42:45.567319 | 2019-04-08T07:55:05 | 2019-04-08T07:55:05 | 177,075,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | #!/home/ivy/Documents/Moringa-school-projects/core-projects/python/hood/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"nziokaivy@gmail.com"
] | nziokaivy@gmail.com | |
c47fd8fda3a6c52e6bd550bbca685152fa026643 | 515a97129ce1b2b8eecca4b2087fde8985b82d5b | /Code-Scraps/old_modules/SpiceBot/Main/forfuckssake.py | 18ffb3c5664225b11ec0c4fd4d80c90d641ea910 | [] | no_license | SpiceBot/scraps | 3ad6e81ac75e2b6a684fea64eb7e75477b0f4f63 | 90125e1397b57ac87cae5f3e506363aa04ddffdc | refs/heads/master | 2020-05-02T21:51:01.297114 | 2019-03-28T15:38:28 | 2019-03-28T15:38:28 | 178,232,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | #!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals, absolute_import, print_function, division
import sopel.module
import sys
import os
moduledir = os.path.dirname(__file__)
shareddir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(shareddir)
from BotShared import *
# author jimender2
@sopel.module.commands('forfuckssake','ffs')
def mainfunction(bot, trigger):
enablestatus, triggerargsarray, botcom, instigator = spicebot_prerun(bot, trigger, 'forfuckssake')
if not enablestatus:
# IF "&&" is in the full input, it is treated as multiple commands, and is split
commands_array = spicemanip(bot, triggerargsarray, "split_&&")
if commands_array == []:
commands_array = [[]]
for command_split_partial in commands_array:
triggerargsarray_part = spicemanip(bot, command_split_partial, 'create')
execute_main(bot, trigger, triggerargsarray_part, botcom, instigator)
def execute_main(bot, trigger, triggerargsarray, botcom, instigator):
osd(bot, trigger.sender, 'say', "For fuck sakes lizard people, get your shit together!!")
| [
"sam@deathbybandaid.net"
] | sam@deathbybandaid.net |
2ffe78d090dc035ee3e3c38344a9e74b0442c2ec | 97758972dcbc3ad0e1e6057cdf1e0265c9e26c46 | /circularly_linked_list/tests/test_insert_cyclic_list.py | ac498d7e144659df1c5b77a38f88fb40f13cdf5f | [
"MIT"
] | permissive | ahcode0919/python-ds-algorithms | a4eea3358258e0ec3802aa9bf4470aa81b399d2a | 966565753eba5414903300379db6abac1b80a3d0 | refs/heads/main | 2022-12-23T11:56:21.142540 | 2022-12-19T16:22:24 | 2022-12-19T16:22:24 | 140,489,999 | 0 | 3 | MIT | 2022-12-19T16:22:25 | 2018-07-10T21:40:32 | Python | UTF-8 | Python | false | false | 627 | py | from circularly_linked_list.insert_cyclic_list import insert
from data_structures.singly_linked_list_node import SinglyLinkedListNode
from test_helpers.test_helpers import get_cyclic_list_values
def test_insert():
head = None
assert get_cyclic_list_values(insert(head, 1)) == [1]
head = SinglyLinkedListNode(1)
head.next = head
assert get_cyclic_list_values(insert(head, 2)) == [1, 2]
head = SinglyLinkedListNode(3)
head.next = SinglyLinkedListNode(4)
head.next.next = SinglyLinkedListNode(1)
head.next.next.next = head
assert get_cyclic_list_values(insert(head, 2)) == [3, 4, 1, 2]
| [
"noreply@github.com"
] | ahcode0919.noreply@github.com |
745cdf579469254d8a84fa40c6cc5cde7de681e1 | e5755d76e50e902246884310a7781059bd7ff222 | /mongoDB/6.Insert.py | 7dc6eba4175fdbcb45942c935ac3da1f5130d1ee | [] | no_license | Sens3ii/PP2-2020 | 4f1f9c0588476ca415b0ae2efc0f171e826dd3f8 | 3675aa4860f727ecb26360d8624e396663dfc5b2 | refs/heads/master | 2023-05-29T15:02:17.803095 | 2021-06-14T17:05:52 | 2021-06-14T17:05:52 | 236,768,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | import pymongo
myclient = pymongo.MongoClient("mongodb+srv://students:321321@firstcluster-bxilw.gcp.mongodb.net/test?retryWrites=true&w=majority")
mydb = myclient["mydatabase"]
mycol = mydb["students"]
mylist = [
{ "_id": 1, "name": "John", "address": "Highway 37"},
{ "_id": 2, "name": "Peter", "address": "Lowstreet 27"},
{ "_id": 3, "name": "Amy", "address": "Apple st 652"},
{ "_id": 4, "name": "Hannah", "address": "Mountain 21"},
{ "_id": 5, "name": "Michael", "address": "Valley 345"},
{ "_id": 6, "name": "Sandy", "address": "Ocean blvd 2"},
{ "_id": 7, "name": "Betty", "address": "Green Grass 1"},
{ "_id": 8, "name": "Richard", "address": "Sky st 331"},
{ "_id": 9, "name": "Susan", "address": "One way 98"},
{ "_id": 10, "name": "Vicky", "address": "Yellow Garden 2"},
{ "_id": 11, "name": "Ben", "address": "Park Lane 38"},
{ "_id": 12, "name": "William", "address": "Central st 954"},
{ "_id": 13, "name": "Chuck", "address": "Main Road 989"},
{ "_id": 14, "name": "Viola", "address": "Sideway 1633"}
]
x = mycol.insert_many(mylist)
#print a list of the _id values of the inserted documents:
print(x.inserted_ids) | [
"noreply@github.com"
] | Sens3ii.noreply@github.com |
fc16106826b25ab2c3851b6b86707066454ecae6 | 4fc87c7c55d431943eba76caaa76cc889e99bd3f | /npf/core/workflow/models/workflow_mytask.py | 52b87a074885ee3a7de16a31c5cd5377c973edeb | [] | no_license | Bonasolvo/npf-dev-roles | c774359b79642ae9ca2c82daeb0591677bd8e88c | dbde9493f2d23fd238dd3a6d8771bbbc5a650724 | refs/heads/master | 2016-09-01T05:35:50.246086 | 2015-12-15T07:02:40 | 2015-12-15T07:02:40 | 48,026,149 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | from npf.core.workflow.models import WorkflowTaskInstance
class WorkflowMyTaskInstance(WorkflowTaskInstance):
"""
Прокси-модель: Экземпляр задачи. Используется для фильтрации всех задач по текущему пользователю и
вывода списка "Мои задачи".
"""
class Meta:
verbose_name = 'Задача'
verbose_name_plural = 'Мои задачи'
proxy = True
| [
"tymashh@Mac-Tymashh.local"
] | tymashh@Mac-Tymashh.local |
b35986bb150d6f28720c88f4ce694446174ca46c | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/cctbx/development/electron_density_sampling.py | bc7f0cf4ae8f29f9ce2b13518f1c8d09d8f3953c | [
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 1,856 | py | from __future__ import absolute_import, division, print_function
from cctbx import xray
from cctbx import miller
from cctbx.command_line.structure_factor_timings import read_structure
import sys
def timings(structure, wing_cutoff=1.e-3):
print("wing_cutoff for following fft calculations: %3.1e"%wing_cutoff)
for calc_type,exp_table_one_over_step_size in (("exp function:",0),
("exp table:",-100)):
print(calc_type)
for d_min in [4,3,2,1]:
structure_ng = structure.deep_copy_scatterers()
structure_ng.scattering_type_registry(d_min=d_min, table="n_gaussian")
structure_4g = structure.deep_copy_scatterers()
structure_4g.scattering_type_registry(table="it1992")
miller_set = miller.build_set(
crystal_symmetry=structure,
d_min=d_min,
anomalous_flag=False)
miller_set.show_summary()
times = []
for structure in (structure_ng, structure_4g):
structure.scattering_type_registry().show_summary()
f_calc_object = xray.structure_factors.from_scatterers(
miller_set=miller_set,
wing_cutoff=wing_cutoff,
exp_table_one_over_step_size=exp_table_one_over_step_size)(
xray_structure=structure,
miller_set=miller_set,
algorithm="fft")
times.append(f_calc_object.manager().estimate_time_fft.time_sampling)
print(" %.2f seconds," % times[-1])
print("d_min=%d: %.2f s / %.2f s" % (d_min, times[0], times[1]), end=' ')
if (times[1] != 0):
print("= %.2f" % (times[0] / times[1]), end=' ')
print()
sys.stdout.flush()
print()
def run(args):
assert len(args) == 1
structure = read_structure(args[0])
structure.show_summary()
print()
timings(structure=structure)
if (__name__ == "__main__"):
run(sys.argv[1:])
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
1903fd395e39e1a5fe47d28f6a0c5d63f5ac1553 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v9/errors/types/ad_group_ad_error.py | 350f6684cdb369f03e6e2283d6c52be5fe489255 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 1,533 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"AdGroupAdErrorEnum",},
)
class AdGroupAdErrorEnum(proto.Message):
r"""Container for enum describing possible ad group ad errors.
"""
class AdGroupAdError(proto.Enum):
r"""Enum describing possible ad group ad errors."""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_AD_LABEL_DOES_NOT_EXIST = 2
AD_GROUP_AD_LABEL_ALREADY_EXISTS = 3
AD_NOT_UNDER_ADGROUP = 4
CANNOT_OPERATE_ON_REMOVED_ADGROUPAD = 5
CANNOT_CREATE_DEPRECATED_ADS = 6
CANNOT_CREATE_TEXT_ADS = 7
EMPTY_FIELD = 8
RESOURCE_REFERENCED_IN_MULTIPLE_OPS = 9
AD_TYPE_CANNOT_BE_PAUSED = 10
AD_TYPE_CANNOT_BE_REMOVED = 11
CANNOT_UPDATE_DEPRECATED_ADS = 12
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
04e4e7fb2beff305b6436fb10b8bcb32563735f2 | d305e9667f18127e4a1d4d65e5370cf60df30102 | /tests/st/ops/ascend/test_aicpu_ops/test_fused_sparse_ftrl.py | f48235fbcabe2fb6abd5074475895f2115054473 | [
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | imyzx2017/mindspore_pcl | d8e5bd1f80458538d07ef0a8fc447b552bd87420 | f548c9dae106879d1a83377dd06b10d96427fd2d | refs/heads/master | 2023-01-13T22:28:42.064535 | 2020-11-18T11:15:41 | 2020-11-18T11:15:41 | 313,906,414 | 6 | 1 | Apache-2.0 | 2020-11-18T11:25:08 | 2020-11-18T10:57:26 | null | UTF-8 | Python | false | false | 1,924 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
lr = 0.01
l1 = 0.0
l2 = 0.0
lr_power = -0.5
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.fused_sparse_ftrl = P.FusedSparseFtrl(lr=0.1, l1=0.0, l2=0.0, lr_power=-0.5)
self.var = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="var")
self.accum = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="accum")
self.linear = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="linear")
def construct(self, grad, indices):
return self.fused_sparse_ftrl(self.var, self.accum, self.linear, grad, indices)
def test_net():
gradient = Tensor(np.array([-3, 2, 3, 0, 0, 0, -4, -1, -2])
.reshape([3, 3]).astype(np.float32))
indices = Tensor(np.ones([3]), mstype.int32)
net = Net()
output = net(gradient, indices)
print(output)
print(net.var.data)
print(net.accum.data)
print(net.linear.data)
| [
"513344092@qq.com"
] | 513344092@qq.com |
4976b2b73dfeae906d91da69abf0aeede0d747a4 | b1ba5707a5cbe918d33bc2082b3eb4ff1378c060 | /SDPython/tests/test_sd_Katana/AccessProperties.py | 3b442e9ec2a1a19b233491f26519bb652e48bb65 | [] | no_license | qq781217732/SubstanceDev | 2eb1d9ed48d477cf70c7bfdac2103bb884e9204c | b9ffab0a1b8f3c01783259074940b2712a8142b8 | refs/heads/master | 2023-03-26T00:43:35.047305 | 2021-03-01T04:12:28 | 2021-03-01T04:12:28 | 342,539,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | import sd
# Import the required classes.
from sd.api.sdproperty import SDPropertyCategory
from sd.api.sdvalueserializer import SDValueSerializer
# Get and print information regarding the selected nodes.
def printSelectedNodesInfo(nodes):
for node in nodes:
definition = node.getDefinition()
nodeId = node.getIdentifier()
print("node %s, id = %s" % (definition.getLabel(), nodeId))
# Create a list of each property category enumeration item.
categories = [
SDPropertyCategory.Annotation,
SDPropertyCategory.Input,
SDPropertyCategory.Output
]
# Get node properties for each property category.
for category in categories:
props = definition.getProperties(category)
# Get the label and identifier of each property.
for prop in props:
label = prop.getLabel()
propId = prop.getId()
# Get the connection for the currently accessed property.
if prop.isConnectable():
connections = node.getPropertyConnections(prop)
if connections:
print("Propery %s is connected!!!" % label)
continue
# Get the value for the currently accessed property.
value = node.getPropertyValue(prop)
if value:
print("Property %s, id = %s, value = %s" % (label, propId, SDValueSerializer.sToString(value))) | [
"gaoyuyang@senseinn.com"
] | gaoyuyang@senseinn.com |
cca40c38589af36a682c1ef7ba42167804c19b98 | 7867e319f00994767fe748a107d927cf6f3181b8 | /src/pipx/interpreter.py | 6ec093c8237356a8ce3a267b9c1c35594b1509e9 | [
"MIT"
] | permissive | pypa/pipx | c34b687f7b88fe4e7f30971c05c466f6a0f45931 | 248fa37e7a0ea4a70a30a4352c0eb065137d3e15 | refs/heads/main | 2023-08-31T04:27:29.607704 | 2023-08-29T13:39:07 | 2023-08-29T13:39:07 | 151,871,286 | 3,264 | 190 | MIT | 2023-09-05T06:23:55 | 2018-10-06T18:47:46 | Python | UTF-8 | Python | false | false | 2,564 | py | import os
import shutil
import subprocess
import sys
from typing import Optional
from pipx.constants import WINDOWS
from pipx.util import PipxError
def has_venv() -> bool:
try:
import venv # noqa
return True
except ImportError:
return False
# The following code was copied from https://github.com/uranusjr/pipx-standalone
# which uses the same technique to build a completely standalone pipx
# distribution.
#
# If we are running under the Windows embeddable distribution,
# venv isn't available (and we probably don't want to use the
# embeddable distribution as our applications' base Python anyway)
# so we try to locate the system Python and use that instead.
def find_py_launcher_python(python_version: Optional[str] = None) -> Optional[str]:
py = shutil.which("py")
if py and python_version:
py = subprocess.run(
[py, f"-{python_version}", "-c", "import sys; print(sys.executable)"],
capture_output=True,
text=True,
).stdout.strip()
return py
def _find_default_windows_python() -> str:
if has_venv():
return sys.executable
python = find_py_launcher_python() or shutil.which("python")
if python is None:
raise PipxError("No suitable Python found")
# If the path contains "WindowsApps", it's the store python
if "WindowsApps" not in python:
return python
# Special treatment to detect Windows Store stub.
# https://twitter.com/zooba/status/1212454929379581952
proc = subprocess.run(
[python, "-V"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL
)
if proc.returncode != 0:
# Cover the 9009 return code pre-emptively.
raise PipxError("No suitable Python found")
if not proc.stdout.strip():
# A real Python should print version, Windows Store stub won't.
raise PipxError("No suitable Python found")
return python # This executable seems to work.
def _get_sys_executable() -> str:
if WINDOWS:
return _find_default_windows_python()
else:
return sys.executable
def _get_absolute_python_interpreter(env_python: str) -> str:
which_python = shutil.which(env_python)
if not which_python:
raise PipxError(f"Default python interpreter '{env_python}' is invalid.")
return which_python
env_default_python = os.environ.get("PIPX_DEFAULT_PYTHON")
if not env_default_python:
DEFAULT_PYTHON = _get_sys_executable()
else:
DEFAULT_PYTHON = _get_absolute_python_interpreter(env_default_python)
| [
"noreply@github.com"
] | pypa.noreply@github.com |
2a8e16d986f346e286ee2aae5a8909a6121c790e | d2c229f74a3ca61d6a22f64de51215d9e30c5c11 | /qiskit/circuit/library/data_preparation/__init__.py | fbd033996c9e0bdefd7806a92bb64a821df9335c | [
"Apache-2.0"
] | permissive | 1ucian0/qiskit-terra | 90e8be8a7b392fbb4b3aa9784c641a818a180e4c | 0b51250e219ca303654fc28a318c21366584ccd3 | refs/heads/main | 2023-08-31T07:50:33.568824 | 2023-08-22T01:52:53 | 2023-08-22T01:52:53 | 140,555,676 | 6 | 1 | Apache-2.0 | 2023-09-14T13:21:54 | 2018-07-11T09:52:28 | Python | UTF-8 | Python | false | false | 2,205 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Data-encoding circuits
======================
In machine learning, pattern recognition and image processing, a **data-encoding circuit**
starts from an initial set of measured data and builds derived values (also known as
**features**) intended to be informative and non-redundant, facilitating the subsequent
learning and generalization steps, and in some cases leading to better human
interpretations.
A feature map is related to **dimensionality reduction**; it involves reducing the amount of
resources required to describe a large set of data. When performing analysis of complex data,
one of the major problems stems from the number of variables involved. Analysis with a large
number of variables generally requires a large amount of memory and computation power, and may
even cause a classification algorithm to overfit to training samples and generalize poorly to new
samples.
When the input data to an algorithm is too large to be processed and is suspected to be redundant
(for example, the same measurement is provided in both pounds and kilograms), then it can be
transformed into a reduced set of features, named a **feature vector**.
The process of determining a subset of the initial features is called **feature selection**.
The selected features are expected to contain the relevant information from the input data,
so that the desired task can be performed by using the reduced representation instead
of the complete initial data.
"""
from .pauli_feature_map import PauliFeatureMap
from .z_feature_map import ZFeatureMap
from .zz_feature_map import ZZFeatureMap
from .state_preparation import StatePreparation
__all__ = ["PauliFeatureMap", "ZFeatureMap", "ZZFeatureMap", "StatePreparation"]
| [
"noreply@github.com"
] | 1ucian0.noreply@github.com |
97690e0df7979136e3cb322a4672832e4770244b | ae646229187ab11607e4889e1cf0e380b26fae5c | /test_joyce_code/limestone/expPatientSize.py | 837f60f645e7ee64eb8ff96888828e8171e295f7 | [] | no_license | aschein/tensor_analysis | cb60caf56713cfb7191c46d3cc20c32ea591d382 | 155754be7fa8cfb97432997cb66aa37b1a7b582b | refs/heads/master | 2021-01-17T07:44:00.657311 | 2014-09-11T20:45:14 | 2014-09-11T20:45:14 | 34,183,143 | 1 | 2 | null | 2018-08-25T20:15:18 | 2015-04-18T21:19:08 | Python | UTF-8 | Python | false | false | 1,898 | py | """
Experiment to evaluate the effect of the size on computation time
"""
import time
import numpy as np
from sklearn.decomposition import RandomizedPCA
import nimfa
import argparse
import CP_APR
import sptensor
import sptenmat
# Load the original data
filename = 'data/hf-tensor-level1-data.dat'
X = sptensor.loadTensor(filename)
R = 40
iters=70
samples=10
pcaModel = RandomizedPCA(n_components=R)
stats = np.zeros((1, 6))
parser = argparse.ArgumentParser()
parser.add_argument("pat", type=int, help="number of patients")
args = parser.parse_args()
pn = args.pat
patList = np.arange(pn)
ix = np.in1d(X.subs[:,0].ravel(), patList)
idx = np.where(ix)[0]
xprime = sptensor.sptensor(X.subs[idx, :], X.vals[idx], [pn, X.shape[1], X.shape[2]])
flatX = sptenmat.sptenmat(xprime, [0]).tocsrmat() # matricize along the first mode
stats = np.zeros((1,6))
## NMF Timing
for k in range(samples):
startTime = time.time()
nmfModel = nimfa.mf(flatX, method="nmf", max_iter=iters, rank=R)
nmfResult = nimfa.mf_run(nmfModel)
elapsed = time.time() - startTime
stats = np.vstack((stats, np.array([R, iters, pn, k, "NMF", elapsed])))
## PCA Timing
for k in range(samples):
startTime = time.time()
pcaModel.fit(flatX)
elapsed = time.time() - startTime
stats = np.vstack((stats, np.array([R, iters, pn, k, "PCA", elapsed])))
## Tensor factorization timing
for k in range(samples):
startTime = time.time()
CP_APR.cp_apr(xprime, R, maxiters=iters)
elapsed = time.time() - startTime
stats = np.vstack((stats, np.array([R, iters, pn, k, "CP_APR", elapsed])))
stats = np.delete(stats, (0), axis=0)
outFile = "results/patient-cpu-{0}.csv".format(pn)
np.savetxt(outFile, stats, fmt="%s", delimiter="|")
print "load data local infile '/home/joyce/workspace/Health/analysis/tensor/{0}' into table comp_metrics fields terminated by '|' ;\n".format(outFile)
| [
"robchen401@gmail.com"
] | robchen401@gmail.com |
201d58374ddb1f5d1ca8815b7e02ca9867fdb3a1 | 95d7291ce528ab40506d111f46c4f243b4b88514 | /backend/home/migrations/0002_load_initial_data.py | 2b9ff4b97b79ec4a4c6c2e7d002e81fe6d862ff6 | [] | no_license | crowdbotics-apps/dared-3-22630 | 8d2974d89edccddffdb39b1ad120e55fcc3092e5 | b6f8edff1e762bddf9f996ff53b72225eab1375c | refs/heads/master | 2023-01-09T18:51:28.888786 | 2020-11-14T20:53:35 | 2020-11-14T20:53:35 | 312,900,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Dared 3"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Dared 3</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "dared-3-22630.botics.co"
site_params = {
"name": "Dared 3",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
9928c7ec02949ebb1ffaf41dc2a1309e05a1b420 | 07fe650b914e6577b42f3214bfdec3834e6e5eee | /gdslib/coupler_ring.py | 83c169ba15dbd3d12ef28cec15e0a6c0b91d02fa | [
"MIT"
] | permissive | sequoiap/gdslib | e2379baac55bb5715183c6e68b69dc6213d009db | 3e6d081a2196e13e89fef45cae5c7de41b96a7fc | refs/heads/master | 2022-08-01T07:54:53.880323 | 2020-05-18T18:26:42 | 2020-05-18T18:26:42 | 265,635,055 | 0 | 0 | MIT | 2020-05-20T17:12:59 | 2020-05-20T17:12:59 | null | UTF-8 | Python | false | false | 629 | py | import pp
from gdslib.load import load
def coupler_ring(c=pp.c.coupler_ring, **kwargs):
""" coupler for half a ring
.. code::
N0 N1
| |
\ /
\ /
---=========---
W0 length_x E0
"""
m = load(c, **kwargs)
return m
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
wav = np.linspace(1520, 1570, 1024) * 1e-9
f = 3e8 / wav
c = coupler_ring()
s = c.s_parameters(freq=f)
plt.plot(wav, np.abs(s[:, 1] ** 2))
print(c.pins)
plt.show()
| [
"j"
] | j |
6cc4227ceafde33b34a254323ad23ffc0142a679 | 7f02a1297660601d40d5781cb7adbc2f4520029f | /macode/vae/train/atari_all.py | b0cedeff19ad6f234acaadddeec618235324a222 | [] | no_license | llach/ma-code | 2c4fb20ae1df9d457ec6736d3725104f37203824 | b8e6c279f966e6b9fadfa67731d8adb970106413 | refs/heads/master | 2020-04-16T18:34:36.242778 | 2019-07-21T13:43:03 | 2019-07-21T13:43:03 | 165,826,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | import tensorflow as tf
from forkan.models import VAE
from forkan.datasets import load_atari_normalized
learning_rate = 1e-4
beta = 5.5
latents = 20
for name in ['pong', 'breakout', 'boxing', 'gopher', 'upndown']:
data = load_atari_normalized(name)
v = VAE(data.shape[1:], name=name, lr=learning_rate, beta=beta, latent_dim=latents)
v.train(data, num_episodes=50, print_freq=-1)
tf.reset_default_graph()
del data
del v
| [
"llach@techfak.uni-bielefeld.de"
] | llach@techfak.uni-bielefeld.de |
6955829c9dcbcfbf6e414fa698bc84b446ebf450 | 8a82a83655f118208692e55d7804d9fa480ad4b6 | /book/packt/Mastering.Python.Scientific.Computing/Chapter 8/B02092_08_14.py | 16ebc5170e994c476513f720fd77f6f03af65761 | [] | no_license | xenron/sandbox-da-python | 0814159da9a91923e4b66c5e40057e381f765e96 | ab8f1c0d57fdc6006355f613012b84165068c315 | refs/heads/master | 2020-04-12T05:41:33.182110 | 2016-12-14T22:57:33 | 2016-12-14T22:57:33 | 60,324,979 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | #!/usr/bin/env python
import sys
for line in sys.stdin:
try:
line = line.strip()
# split the line into words
words = line.split()
# increase counters
if words[0] == "WARC-Target-URI:" :
uri = words[1].split("/")
print '%s\t%s' % (uri[0]+"//"+uri[2], 1)
except Exception:
""
#hadoop jar /usr/local/apache/hadoop2/share/hadoop/tools/lib/hadoop-streaming-2.6.0.jar -file /mapper.py -mapper /mapper.py -file /reducer.py -reducer /reducer.py -input /text.txt -output /output
| [
"xenron@outlook.com"
] | xenron@outlook.com |
3767bcf0f0bfe6b74fb52776c215a52ebbceecfe | 7506c49859870af9e62c3e919857ffcdf2e9a19e | /book2/Seq2SeqLearning/statistic_word5.py | 66fde97061d026ee8ff261cf27241b565937ef27 | [] | no_license | Git2191866109/BookStudy | d363717285a5e9767e582f6efd1258680fa26f80 | f172244218871372ca94286c3db64cf334627ef3 | refs/heads/master | 2022-11-08T00:15:00.963332 | 2020-06-28T10:28:33 | 2020-06-28T10:28:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,744 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# coding=utf-8
"""
@author: Li Tian
@contact: 694317828@qq.com
@software: pycharm
@file: statistic_word5.py
@time: 2019/4/29 10:39
@desc: 用tf.while_loop来实现解码过程
"""
import tensorflow as tf
import codecs
# 读取checkpoint的路径。9000表示是训练程序在第9000步保存的checkpoint
CHECKPOINT_PATH = "./new_seq2seq_ckpt"
# 模型参数。必须与训练时的模型参数保持一致。
# LSTM的隐藏层规模
HIDDEN_SIZE = 1024
# 深层循环神经网络中LSTM结构的层数
NUM_LAYERS = 2
# 源语言词汇表大小
SRC_VOCAB_SIZE = 10000
# 目标语言词汇表大小
TRG_VOCAB_SIZE = 4000
# 在Softmax层和词向量层之间共享参数
SHARE_EMB_AND_SOFTMAX = True
# 词汇表中<sos>和<eos>的ID。在解码过程中需要用<sos>作为第一步的输入,并将检查是否是<eos>,因此需要知道这两个符号的ID
SOS_ID = 1
EOS_ID = 2
# 词汇表文件
SRC_VOCAB = "en.vocab"
TRG_VOCAB = "zh.vocab"
# 定义NMTModel类来描述模型
class NMTModel(object):
# 在模型的初始化函数中定义模型要用到的变量
def __init__(self):
# 与训练时的__init__函数相同。通常在训练程序和解码程序中复用NMTModel类以及__init__函数,以确保解码时和训练时定义的变量是相同的
# 定义编码器和解码器所使用的LSTM结构
self.enc_cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE) for _ in range(NUM_LAYERS)])
self.dec_cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE) for _ in range(NUM_LAYERS)])
# 为源语言和目标语言分别定义词向量
self.src_embedding = tf.get_variable("src_emb", [SRC_VOCAB_SIZE, HIDDEN_SIZE])
self.trg_embedding = tf.get_variable("trg_emb", [TRG_VOCAB_SIZE, HIDDEN_SIZE])
# 定义softmax层的变量
if SHARE_EMB_AND_SOFTMAX:
self.softmax_weight = tf.transpose(self.trg_embedding)
else:
self.softmax_weight = tf.get_variable("weight", [HIDDEN_SIZE, TRG_VOCAB_SIZE])
self.softmax_bias = tf.get_variable("softmax_bias", [TRG_VOCAB_SIZE])
def inference(self, src_input):
# 虽然输入只有一个句子,但因为dynamic_rnn要求输入是batch的形式,因此这里将输入句子整理为大小为1的batch
src_size = tf.convert_to_tensor([len(src_input)], dtype=tf.int32)
src_input = tf.convert_to_tensor([src_input], dtype=tf.int32)
src_emb = tf.nn.embedding_lookup(self.src_embedding, src_input)
# 使用dynamic_rnn构造编码器。这一步与训练时相同
with tf.variable_scope("encoder"):
enc_outputs, enc_state = tf.nn.dynamic_rnn(self.enc_cell, src_emb, src_size, dtype=tf.float32)
# 设置解码的最大步数。这是为了避免在极端情况出现无限循环的问题。
MAX_DEC_LEN = 100
with tf.variable_scope("decoder/rnn/multi_rnn_cell"):
# 使用一个变长的TensorArray来存储生成的句子
init_array = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True, clear_after_read=False)
# 填入第一个单词<sos>作为解码器的输入
init_array = init_array.write(0, SOS_ID)
# 构建初始的循环状态。循环状态包含循环神经网络的隐藏状态,保存生成句子的TensorArray,以及记录解码步数的一个整数step
init_loop_var = (enc_state, init_array, 0)
# tf.while_loop的循环条件
# 循环直到解码器输出<eos>,或者达到最大步数为止。
def continue_loop_condition(state, trg_ids, step):
return tf.reduce_all(tf.logical_and(tf.not_equal(trg_ids.read(step), EOS_ID), tf.less(step, MAX_DEC_LEN-1)))
def loop_body(state, trg_ids, step):
# 读取最后一步输出的单词,并读取其词向量
trg_input = [trg_ids.read(step)]
trg_emb = tf.nn.embedding_lookup(self.trg_embedding, trg_input)
# 这里不使用dynamic_rnn,而是直接调用dec_cell向前计算一步。
dec_outputs, next_state = self.dec_cell.call(state=state, inputs=trg_emb)
# 计算每个可能的输出单词对应的logit,并选取logit值最大的单词作为这一步的输出。
output = tf.reshape(dec_outputs, [-1, HIDDEN_SIZE])
logits = (tf.matmul(output, self.softmax_weight) + self.softmax_bias)
next_id = tf.argmax(logits, axis=1, output_type=tf.int32)
# 将这一步输出的单词写入循环状态的trg_ids中
trg_ids = trg_ids.write(step+1, next_id[0])
return next_state, trg_ids, step+1
# 执行tf.while_loop,返回最终状态
state, trg_ids, step = tf.while_loop(continue_loop_condition, loop_body, init_loop_var)
return trg_ids.stack()
def main():
# 定义训练用的循环神经网络模型
with tf.variable_scope("nmt_model", reuse=None):
model = NMTModel()
# 定义一个测试的例子
test_sentence = "This is a test ."
print(test_sentence)
# 根据英文词汇表,将测试句子转为单词ID。结尾加上<eos>的编号
test_sentence = test_sentence + " <eos>"
with codecs.open(SRC_VOCAB, 'r', 'utf-8') as vocab:
src_vocab = [w.strip() for w in vocab.readlines()]
# 运用dict,将单词和id对应起来组成字典,用于后面的转换
src_id_dict = dict((src_vocab[x], x) for x in range(SRC_VOCAB_SIZE))
test_en_ids = [(src_id_dict[en_text] if en_text in src_id_dict else src_id_dict['<unk>'])
for en_text in test_sentence.split()]
print(test_en_ids)
# 建立解码所需的计算图
output_op = model.inference(test_en_ids)
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, CHECKPOINT_PATH)
# 读取翻译结果
output_ids = sess.run(output_op)
print(output_ids)
# 根据中文词汇表,将翻译结果转换为中文文字。
with codecs.open(TRG_VOCAB, "r", "utf-8") as f_vocab:
trg_vocab = [w.strip() for w in f_vocab.readlines()]
output_text = ''.join([trg_vocab[x] for x in output_ids[1:-1]])
# 输出翻译结果
print(output_text)
sess.close()
if __name__ == "__main__":
main() | [
"694317828@qq.com"
] | 694317828@qq.com |
8fed339d0d009e1232013c23f8458a9a76188cf0 | 031b24455b953907a0f98778931ee8a03c3c4b6c | /pacman103/core/spinnman/spinnman_utilities.py | 7a49d1ec7d9bba135a19329c236b5ed8deeb1056 | [] | no_license | BRML/HBP-spinnaker-cerebellum | 7e5f69c05d0e51f79442635df58815768f20e6bc | 7fc3eb5c486df66720d227e0e422cbab65c08885 | refs/heads/master | 2020-12-25T23:47:09.416213 | 2015-06-26T09:45:31 | 2015-06-26T09:45:31 | 38,686,607 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,649 | py | __author__ = 'stokesa6'
import os
class SpinnmanUtilities(object):
def __init__(self, dao=None, input_file=None):
self.runtime = None
self.total_processors = None
self.app_loads = list()
self.mem_writes_from_file = list()
self.mem_writes = list()
if dao is not None:
directory = dao.get_reports_directory("transceiver_commands")
self.output_file = os.path.join(directory, "transceiver_commands")
self.output = open(self.output_file, "wb")
self.output_data = list()
if input_file is not None:
self.read_in_file(input_file)
def write_extra_data(self, runtime, total_processors):
self.output_data.insert(0, "TOTAL_PROCESSORS:{}:".format(total_processors))
self.output_data.insert(0, "RUNTIME:{}:".format(runtime))
# different types of writes
def write_app_load_command(self, key, region, core_part_of_region, app_id):
self.output_data.append("APPLOAD:{}:{}:{}:{}:".format(key, region,
core_part_of_region,
app_id))
def write_selects(self, x, y, p):
self.output_data.append("SELECT:{}:{}:{}:".format(x, y, p))
def write_mem_from_file(self, address, type_word, filename):
self.output_data.append("WRITE_MEM_FROM_FILE:{}:{}:{}:".
format(address, int(type_word), filename))
def write_mem(self, address, type_word, structure):
self.output_data.append("WRITE_MEM:{}:{}:{}:".
format(address, int(type_word), structure))
def close(self):
for line in self.output_data:
self.output.write(line + "\n")
self.output.flush()
self.output.close()
def get_run_time(self):
return self.runtime
def get_total_processors(self):
return self.total_processors
def get_app_loads(self):
return self.app_loads
def get_mem_writes(self):
return self.mem_writes
def get_mem_writes_from_file(self):
return self.mem_writes_from_file
def read_in_file(self, input_file):
inputfile = open(input_file, "r")
content = inputfile.readlines()
self.runtime = content[0].split(":")[1]
self.total_processors = content[1].split(":")[1]
self.app_loads = list()
data = None
line = 0
for line in range(2, len(content)):
bits = content[line].split(":")
if bits[0] == "APPLOAD":
data = dict()
data['key'] = bits[1]
data['region'] = bits[2]
data['core_part_of_region'] = bits[3]
data['app_id'] = bits[4]
self.app_loads.append(data)
elif bits[0] == "SELECT":
data = dict()
data['x'] = bits[1]
data['y'] = bits[2]
data['p'] = bits[3]
elif bits[0] == "WRITE_MEM":
self.mem_writes.append(data)
data = dict()
data['address'] = bits[1]
data['type_word'] = bits[2]
data['structure'] = bits[3]
self.mem_writes.append(data)
elif bits[0] == "WRITE_MEM_FROM_FILE":
self.mem_writes_from_file.append(data)
data = dict()
data['address'] = bits[1]
data['type_word'] = bits[2]
data['filename'] = bits[3]
self.mem_writes_from_file.append(data)
| [
"dr.christoph.richter@gmail.com"
] | dr.christoph.richter@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.