blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f14d86124a58ed2a0ba3b2173fa644ccf6aed101
|
ba45840f241a0348d8f11df4bf5e16dee230bda5
|
/config/config.py
|
88fe4b900e6b8435fdccd2cd0ced9374f2fd1578
|
[] |
no_license
|
zhengxiawu/FGIR-GAN
|
2d630b4330da535e157f13561160789f2d1183c4
|
2a4e94c1c736d2b20255eda34b801e9fc2be62af
|
refs/heads/master
| 2020-03-07T02:57:37.518206
| 2018-04-04T01:42:32
| 2018-04-04T01:42:32
| 127,222,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,417
|
py
|
# --------------------------------------------------------
# MXNet Implementation of pix2pix GAN
# Copyright (c) 2017 UIUC
# Modified by Bowen Cheng
# --------------------------------------------------------
import yaml
import numpy as np
from easydict import EasyDict as edict
config = edict()
config.MXNET_VERSION = ''
config.output_path = ''
config.symbol = ''
config.gpus = ''
config.RNG_SEED = 1
config.loadSize = 286
config.fineSize = 256
config.AtoB = False
config.netG = 'autoencoder' # 'autoencoder' or 'unet'
config.netD = 'basic' # 'basic' or 'n_layers'
config.n_layers = 0 # only used if netD=='n_layers'
config.GAN_loss = 1 # use GAN loss set to 1, do not use GAN loss set to 0
config.Trained_model_loss = 1
# default training
config.default = edict()
config.default.frequent = 20
config.default.kvstore = 'device'
# dataset related params
config.dataset = edict()
config.dataset.dataset = 'facades'
config.dataset.root = './data'
config.dataset.imageset = 'train'
config.dataset.image_root = './datasets'
config.dataset.testset = 'val'
config.dataset.mean_r = 104
config.dataset.mean_g = 117
config.dataset.mean_b = 123
config.TRAIN = edict()
config.TRAIN.optimizer = 'adam'
config.TRAIN.lr = 0.0002
config.TRAIN.beta1 = 0.5
config.TRAIN.beta2 = 0.999
config.TRAIN.momentum = 0.9
config.TRAIN.wd = 0.0005
config.TRAIN.begin_epoch = 0
config.TRAIN.end_epoch = 200
config.TRAIN.num_batches = 1000
config.TRAIN.model_prefix = ''
config.TRAIN.step_epoch = 100
config.TRAIN.decay_epoch = 100
# whether resume training
config.TRAIN.RESUME = False
# whether shuffle image
config.TRAIN.SHUFFLE = True
config.TRAIN.FLIP = True
# batch size
config.TRAIN.BATCH_SIZE = 1
config.TRAIN.epoch_end_plot_figure = True
config.TRAIN.batch_end_plot_figure = False
config.TRAIN.save_interval = 20
# L1 loss weight
config.TRAIN.lambda_l1 = 100
config.TEST = edict()
config.TEST.TEST_EPOCH = 0
config.TEST.img_h = 256
config.TEST.img_w = 256
def update_config(config_file):
exp_config = None
with open(config_file) as f:
exp_config = edict(yaml.load(f))
for k, v in exp_config.items():
if k in config:
if isinstance(v, dict):
for vk, vv in v.items():
config[k][vk] = vv
else:
config[k] = v
else:
raise ValueError("key must exist in config.py")
|
[
"zhengxiawu@126.com"
] |
zhengxiawu@126.com
|
bfe93474345ec70c961dbdc527b854fb60902af2
|
084a13b6524e21914826e842eeefefd09570a970
|
/experiments/atari_hard/montezuma_revenge/ppo_cnd_110_2.py
|
3737607040f72aec5ff14669c39f29ff72e48a5b
|
[
"MIT"
] |
permissive
|
michalnand/reinforcement_learning
|
28aa0e2c92b6112cf366eff0e0d6a78b9a56e94f
|
01635014a37a4c871766b4cdd2caaa26a0c2d8cc
|
refs/heads/main
| 2023-06-01T10:27:36.601631
| 2023-02-12T19:46:01
| 2023-02-12T19:46:01
| 217,841,101
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
import time
import torch
import RLAgents
import models.ppo_cnd_110_2.src.model_ppo as ModelPPO
import models.ppo_cnd_110_2.src.model_cnd_target as ModelCNDTarget
import models.ppo_cnd_110_2.src.model_cnd as ModelCND
import models.ppo_cnd_110_2.src.config as Config
#torch.cuda.set_device("cuda:0")
#print("running on ", torch.cuda.get_device_name())
path = "models/ppo_cnd_110_2/"
config = Config.Config()
#config.envs_count = 1
envs = RLAgents.MultiEnvParallelOptimised("MontezumaRevengeNoFrameskip-v4", RLAgents.WrapperMontezuma, config.envs_count)
#envs = RLAgents.MultiEnvSeq("MontezumaRevengeNoFrameskip-v4", RLAgents.WrapperMontezuma, config.envs_count, True)
#envs = RLAgents.MultiEnvSeq("MontezumaRevengeNoFrameskip-v4", RLAgents.WrapperMontezumaVideo, config.envs_count)
agent = RLAgents.AgentPPOCND(envs, ModelPPO, ModelCNDTarget, ModelCND, config)
max_iterations = 1000000
trainig = RLAgents.TrainingIterations(envs, agent, max_iterations, path, 128)
trainig.run()
'''
agent.load(path)
agent.disable_training()
while True:
reward, done, _ = agent.main()
'''
|
[
"michal.nand@gmail.com"
] |
michal.nand@gmail.com
|
2cbe46e844394113c5a7b0db976db90e3d92a72b
|
d12b59b33df5c467abf081d48e043dac70cc5a9c
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/capabilities_e34fedc02893b4ebddb7e5f9d242efcc.py
|
4d5b4d23c25816f353599659ba9dc980cfeb59ec
|
[
"MIT"
] |
permissive
|
ajbalogh/ixnetwork_restpy
|
59ce20b88c1f99f95a980ff01106bda8f4ad5a0f
|
60a107e84fd8c1a32e24500259738e11740069fd
|
refs/heads/master
| 2023-04-02T22:01:51.088515
| 2021-04-09T18:39:28
| 2021-04-09T18:39:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,008
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Capabilities(Base):
"""A high level object that allows to define the OpenFlow Switch capabilities configuration.
The Capabilities class encapsulates a required capabilities resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'capabilities'
_SDM_ATT_MAP = {
'FlowStatistics': 'flowStatistics',
'GroupStatistics': 'groupStatistics',
'MatchIpAddressInArpPackets': 'matchIpAddressInArpPackets',
'PortStatistics': 'portStatistics',
'QueueStatistics': 'queueStatistics',
'ReassambleIpFragments': 'reassambleIpFragments',
'Reserved': 'reserved',
'SpanningTree': 'spanningTree',
'SwitchWillBlockLoopingPorts': 'switchWillBlockLoopingPorts',
'TableStatistics': 'tableStatistics',
}
def __init__(self, parent):
super(Capabilities, self).__init__(parent)
@property
def FlowStatistics(self):
"""
Returns
-------
- bool: Indicates that the ofChannel capabilities of the switch includes flow statistics.
"""
return self._get_attribute(self._SDM_ATT_MAP['FlowStatistics'])
@FlowStatistics.setter
def FlowStatistics(self, value):
self._set_attribute(self._SDM_ATT_MAP['FlowStatistics'], value)
@property
def GroupStatistics(self):
"""
Returns
-------
- bool: If true, indicates that the capabilities of the switch include Group Statistics.
"""
return self._get_attribute(self._SDM_ATT_MAP['GroupStatistics'])
@GroupStatistics.setter
def GroupStatistics(self, value):
self._set_attribute(self._SDM_ATT_MAP['GroupStatistics'], value)
@property
def MatchIpAddressInArpPackets(self):
"""
Returns
-------
- bool: If true, indicates that the capabilities of the switch includes Match IP addresses in ARP pkts.
"""
return self._get_attribute(self._SDM_ATT_MAP['MatchIpAddressInArpPackets'])
@MatchIpAddressInArpPackets.setter
def MatchIpAddressInArpPackets(self, value):
self._set_attribute(self._SDM_ATT_MAP['MatchIpAddressInArpPackets'], value)
@property
def PortStatistics(self):
"""
Returns
-------
- bool: Indicates that the ofChannel capabilities of the switch includes port statistics.
"""
return self._get_attribute(self._SDM_ATT_MAP['PortStatistics'])
@PortStatistics.setter
def PortStatistics(self, value):
self._set_attribute(self._SDM_ATT_MAP['PortStatistics'], value)
@property
def QueueStatistics(self):
"""
Returns
-------
- bool: Indicates that the capabilities of the switch include Queue statistics.
"""
return self._get_attribute(self._SDM_ATT_MAP['QueueStatistics'])
@QueueStatistics.setter
def QueueStatistics(self, value):
self._set_attribute(self._SDM_ATT_MAP['QueueStatistics'], value)
@property
def ReassambleIpFragments(self):
"""
Returns
-------
- bool: Indicates that the capabilities of the switch include reassemble IP fragments at the receiver.
"""
return self._get_attribute(self._SDM_ATT_MAP['ReassambleIpFragments'])
@ReassambleIpFragments.setter
def ReassambleIpFragments(self, value):
self._set_attribute(self._SDM_ATT_MAP['ReassambleIpFragments'], value)
@property
def Reserved(self):
"""
Returns
-------
- bool: Indicates that the capabilities of the switch includes reserved, must be zero.
"""
return self._get_attribute(self._SDM_ATT_MAP['Reserved'])
@Reserved.setter
def Reserved(self, value):
self._set_attribute(self._SDM_ATT_MAP['Reserved'], value)
@property
def SpanningTree(self):
"""
Returns
-------
- bool: Indicates that the capabilities of the switch includes 802.1d spanning tree.
"""
return self._get_attribute(self._SDM_ATT_MAP['SpanningTree'])
@SpanningTree.setter
def SpanningTree(self, value):
self._set_attribute(self._SDM_ATT_MAP['SpanningTree'], value)
@property
def SwitchWillBlockLoopingPorts(self):
"""
Returns
-------
- bool: If true, indicates that switch will block looping ports.
"""
return self._get_attribute(self._SDM_ATT_MAP['SwitchWillBlockLoopingPorts'])
@SwitchWillBlockLoopingPorts.setter
def SwitchWillBlockLoopingPorts(self, value):
self._set_attribute(self._SDM_ATT_MAP['SwitchWillBlockLoopingPorts'], value)
@property
def TableStatistics(self):
"""
Returns
-------
- bool: Indicates that the capabilities of the switch includes table statistics.
"""
return self._get_attribute(self._SDM_ATT_MAP['TableStatistics'])
@TableStatistics.setter
def TableStatistics(self, value):
self._set_attribute(self._SDM_ATT_MAP['TableStatistics'], value)
def update(self, FlowStatistics=None, GroupStatistics=None, MatchIpAddressInArpPackets=None, PortStatistics=None, QueueStatistics=None, ReassambleIpFragments=None, Reserved=None, SpanningTree=None, SwitchWillBlockLoopingPorts=None, TableStatistics=None):
"""Updates capabilities resource on the server.
Args
----
- FlowStatistics (bool): Indicates that the ofChannel capabilities of the switch includes flow statistics.
- GroupStatistics (bool): If true, indicates that the capabilities of the switch include Group Statistics.
- MatchIpAddressInArpPackets (bool): If true, indicates that the capabilities of the switch includes Match IP addresses in ARP pkts.
- PortStatistics (bool): Indicates that the ofChannel capabilities of the switch includes port statistics.
- QueueStatistics (bool): Indicates that the capabilities of the switch include Queue statistics.
- ReassambleIpFragments (bool): Indicates that the capabilities of the switch include reassemble IP fragments at the receiver.
- Reserved (bool): Indicates that the capabilities of the switch includes reserved, must be zero.
- SpanningTree (bool): Indicates that the capabilities of the switch includes 802.1d spanning tree.
- SwitchWillBlockLoopingPorts (bool): If true, indicates that switch will block looping ports.
- TableStatistics (bool): Indicates that the capabilities of the switch includes table statistics.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
|
[
"andy.balogh@keysight.com"
] |
andy.balogh@keysight.com
|
a82b1bbd2b03de5d576c5401152707034fefb3a8
|
d5214b1331c9dae59d95ba5b3aa3e9f449ad6695
|
/qPloneResolveUID/tags/0.2.4/transforms/ruid_to_url.py
|
b243d8358f79baaeef8a74505a0c1b12a115c326
|
[] |
no_license
|
kroman0/products
|
1661ee25a224c4b5f172f98110944f56136c77cf
|
f359bb64db22f468db5d1e411638790e94d535a2
|
refs/heads/master
| 2021-01-10T07:58:04.579234
| 2014-06-11T12:05:56
| 2014-06-11T12:05:56
| 52,677,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,059
|
py
|
# Author: Melnychuk Taras
# Contact: fenix@quintagroup.com
# Date: $Date: 2006-08-11
# Copyright: quintagroup.com
import re
from Products.CMFCore.utils import getToolByName
from Products.PortalTransforms.interfaces import itransform
from Products.qPloneResolveUID.config import *
class ruid_to_url:
"""Transform which replaces resolve uid into urls"""
__implements__ = itransform
__name__ = "ruid_to_url"
inputs = ('text/html',)
output = 'text/html'
def __init__(self, name=None):
if name:
self.__name__ = name
self.tag_regexp = re.compile(TAG_PATTERN ,re.I|re.S)
self.ruid_regexp = re.compile(UID_PATTERN ,re.I|re.S)
def name(self):
return self.__name__
def find_ruid(self, data):
tags_ruid = []
unique_ruid = []
for m in self.tag_regexp.finditer(data):
ruid = re.search(self.ruid_regexp, m.group(0))
if ruid:
tags_ruid.append({m.group(0):ruid.group('uid')})
[unique_ruid.append(tu.values()[0]) for tu in tags_ruid if tu.values()[0] not in unique_ruid]
return tags_ruid, unique_ruid
def mapRUID_URL(self, unique_ruid, portal):
ruid_url = {}
rc = getToolByName(portal, 'reference_catalog')
pu = getToolByName(portal, 'portal_url')
for uid in unique_ruid:
obj = rc.lookupObject(uid)
if obj:
ruid_url[uid] = pu.getRelativeUrl(obj)
return ruid_url
def convert(self, orig, data, **kwargs):
text = orig
tags_ruid, unique_ruid = self.find_ruid(text)
if unique_ruid:
ruid_url = self.mapRUID_URL(unique_ruid, kwargs['context'])
for tag_ruid in tags_ruid:
t, uid = tag_ruid.items()[0]
if ruid_url.has_key(uid):
text = text.replace(t, t.replace('resolveuid/'+uid, ruid_url[uid]))
data.setData(text)
return data
def register():
return ruid_to_url()
|
[
"mylan@4df3d6c7-0a05-0410-9bee-ae8b7a76f946"
] |
mylan@4df3d6c7-0a05-0410-9bee-ae8b7a76f946
|
51ac6b863cba692cbdc9780978fb8f213b7e3c57
|
3b225bf4895df8b5c02d82b94574ed7985b2c69f
|
/test_settings.py
|
44a7acc2fb39588561c79457caebcf6ed97e4b11
|
[] |
no_license
|
yakky/aldryn-faq-1
|
10d8d825447a1ba8d62712fbabe988d3d8203a94
|
3749ad2568432d3e78c0d37627b1bff9f52b69b9
|
refs/heads/master
| 2021-01-15T14:58:46.492723
| 2015-01-16T10:08:44
| 2015-01-16T10:08:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
# -*- coding: utf-8 -*-
HELPER_SETTINGS = {
'ROOT_URLCONF': 'aldryn_faq.tests.urls',
'TIME_ZONE': 'Europe/Zurich',
'LANGUAGES': (
('en', 'English'),
('de', 'German'),
('fr', 'French'),
),
'INSTALLED_APPS': [
'adminsortable',
'aldryn_faq',
'djangocms_text_ckeditor',
'hvad',
'sortedm2m',
],
}
def run():
from djangocms_helper import runner
runner.cms('aldryn_faq')
if __name__ == "__main__":
run()
|
[
"mkoistinen@gmail.com"
] |
mkoistinen@gmail.com
|
2965f5264cd0016485601e825d6789dcf00187f9
|
6710c52d04e17facbc9fb35a7df313f7a2a7bd53
|
/0319. Bulb Switcher.py
|
817afd635d32758fc9c7054706a3bdac532686ba
|
[] |
no_license
|
pwang867/LeetCode-Solutions-Python
|
535088fbe747a453360457728cc22cf336020bd2
|
188befbfb7080ba1053ee1f7187b177b64cf42d2
|
refs/heads/master
| 2022-11-13T16:20:28.211707
| 2020-06-28T06:01:14
| 2020-06-28T06:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
# this question is basicly asking for the count of divisors of a number
# only lighbulbs in m^2 will be on, becuase dividors will always
# appears in a pair except m^2, so only m^2 will have odd numbers of dividors
class Solution(object):
def bulbSwitch(self, n):
"""
:type n: int
:rtype: int
"""
return int(pow(n, 0.5))
"""
There are n bulbs that are initially off. You first turn on all the bulbs. Then, you turn off every second bulb. On the third round, you toggle every third bulb (turning on if it's off or turning off if it's on). For the i-th round, you toggle every i bulb. For the n-th round, you only toggle the last bulb. Find how many bulbs are on after n rounds.
Example:
Input: 3
Output: 1
Explanation:
At first, the three bulbs are [off, off, off].
After first round, the three bulbs are [on, on, on].
After second round, the three bulbs are [on, off, on].
After third round, the three bulbs are [on, off, off].
So you should return 1, because there is only one bulb is on.
"""
|
[
"wzhou007@ucr.edu"
] |
wzhou007@ucr.edu
|
dc73268b30ed69da8d008cedd34a1a6303b4a3b6
|
67bc22c4bb1388994e3c983c9be0d85b6cc80cb2
|
/charmcraft/commands/store/__init__.py
|
c4087cdc5fcf28020a0af3b83ebb7bdcc3759ee8
|
[
"Apache-2.0"
] |
permissive
|
msgpo/charmcraft
|
c35bb3eaf98a9d3e832211d185404256743acd31
|
89adbcb3c059da7c023f789ba8c978494f1d17f9
|
refs/heads/master
| 2022-11-15T08:39:35.942078
| 2020-07-14T14:20:41
| 2020-07-14T14:20:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,483
|
py
|
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
"""Commands related to the Store, a thin layer above real functionality."""
import logging
import os
import pathlib
import yaml
from tabulate import tabulate
from charmcraft.cmdbase import BaseCommand, CommandError
from .store import Store
logger = logging.getLogger('charmcraft.commands.store')
class LoginCommand(BaseCommand):
"""Log into the store."""
name = 'login'
help_msg = "login to Ubuntu Single Sign On"
def run(self, parsed_args):
"""Run the command."""
store = Store()
store.login()
logger.info("Login successful")
class LogoutCommand(BaseCommand):
"""Clear store-related credentials."""
name = 'logout'
help_msg = "clear session credentials"
def run(self, parsed_args):
"""Run the command."""
store = Store()
store.logout()
logger.info("Credentials cleared")
class WhoamiCommand(BaseCommand):
"""Show login information."""
name = 'whoami'
help_msg = "returns your login information relevant to the Store"
def run(self, parsed_args):
"""Run the command."""
store = Store()
result = store.whoami()
data = [
('name:', result.name),
('username:', result.username),
('id:', result.userid),
]
table = tabulate(data, tablefmt='plain')
for line in table.splitlines():
logger.info(line)
class RegisterNameCommand(BaseCommand):
"""Register a name in the Store."""
name = 'register'
help_msg = "register a name in the Store"
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument('name', help="the name to register in the Store")
def run(self, parsed_args):
"""Run the command."""
store = Store()
store.register_name(parsed_args.name)
logger.info("Congrats! You are now the publisher of %r", parsed_args.name)
class ListRegisteredCommand(BaseCommand):
"""List the charms registered in the Store."""
name = 'list'
help_msg = "list the charms registered the Store"
def run(self, parsed_args):
"""Run the command."""
store = Store()
result = store.list_registered_names()
if not result:
logger.info("Nothing found")
return
headers = ['Name', 'Visibility', 'Status']
data = []
for item in result:
visibility = 'private' if item.private else 'public'
data.append([
item.name,
visibility,
item.status,
])
table = tabulate(data, headers=headers, tablefmt='plain')
for line in table.splitlines():
logger.info(line)
class UploadCommand(BaseCommand):
"""Upload a charm file to the Store."""
name = 'upload'
help_msg = "upload a charm file to the Store"
def _discover_charm(self, charm_filepath):
"""Discover the charm name and file path.
If received path is None, a metadata.yaml will be searched in the current directory. If
path is given the name is taken from the filename.
"""
if charm_filepath is None:
# discover the info using project's metadata, asume the file has the project's name
# with a .charm extension
try:
with open('metadata.yaml', 'rb') as fh:
metadata = yaml.safe_load(fh)
charm_name = metadata['name']
except (yaml.error.YAMLError, OSError, KeyError):
raise CommandError(
"Can't access name in 'metadata.yaml' file. The 'upload' command needs to be "
"executed in a valid project's directory, or point to a charm file with "
"the --charm-file option.")
charm_filepath = pathlib.Path(charm_name + '.charm').absolute()
if not os.access(str(charm_filepath), os.R_OK): # access doesnt support pathlib in 3.5
raise CommandError(
"Can't access charm file {!r}. You can indicate a charm file with "
"the --charm-file option.".format(str(charm_filepath)))
else:
# the path is given, asume the charm name is part of the file name
# XXX Facundo 2020-06-30: Actually, we need to open the ZIP file, extract the
# included metadata.yaml file, and read the name from there. Issue: #77.
charm_filepath = charm_filepath.expanduser()
if not os.access(str(charm_filepath), os.R_OK): # access doesnt support pathlib in 3.5
raise CommandError(
"Can't access the indicated charm file: {!r}".format(str(charm_filepath)))
if not charm_filepath.is_file():
raise CommandError(
"The indicated charm is not a file: {!r}".format(str(charm_filepath)))
charm_name = charm_filepath.stem
return charm_name, charm_filepath
def fill_parser(self, parser):
"""Add own parameters to the general parser."""
parser.add_argument(
'--charm-file', type=pathlib.Path,
help="the path to the charm file to be uploaded")
def run(self, parsed_args):
"""Run the command."""
name, path = self._discover_charm(parsed_args.charm_file)
store = Store()
result = store.upload(name, path)
if result.ok:
logger.info("Revision %s of %r created", result.revision, str(name))
else:
# XXX Facundo 2020-06-30: at some point in the future the Store will give us also a
# reason why it failed, to improve the message. Issue: #78.
logger.info("Upload failed: got status %r", result.status)
|
[
"facundo@taniquetil.com.ar"
] |
facundo@taniquetil.com.ar
|
a62693bd536b3eb67490873a1f580a9c1efd2bcd
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_97/85.py
|
8cf79e0f137aeaf913cd1a9586a7cbec25e4ea0e
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
#!/usr/bin/python
import os
import sys
fin = sys.stdin
def normalize(x):
s = str(x)
smallest = x
for i in xrange(1, len(s)):
n = int(s[i:] + s[:i])
if n < smallest:
smallest = n
return smallest
def main():
T = int(fin.readline())
for t in xrange(1, T + 1):
A, B = map(int, fin.readline().split())
m = {}
for n in xrange(A, B+1):
x = normalize(n)
if x in m:
m[x] += 1
else:
m[x] = 1
count = 0
for x in m.values():
if x > 1:
count += (x * x - x) / 2
print 'Case #%d: %d' % (t, count)
if __name__ == '__main__':
if len(sys.argv) > 1:
fin = open(sys.argv[1], 'r')
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
e69357a0b01abbd4d042f0fe6c61619e1f299ebc
|
58828acea95ec3babcada95a62af385e5e924594
|
/tests/test_load.py
|
4b5b36b2c319c8fce1920031831f3aab165cb01d
|
[
"Zlib"
] |
permissive
|
akx/pyalleg
|
22eab5f0fe1291bcaf535cb8a264e3e0474d6378
|
e14eb4dcf84f2a165fb2556ae40305a279d6e4c4
|
refs/heads/master
| 2021-01-21T15:22:47.855437
| 2017-06-25T21:55:07
| 2017-06-25T21:55:07
| 95,387,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,901
|
py
|
from pyalleg import *
import time,random,math
def rnd(m): return random.randint(0,m)
init()
initGfx(0,640,480)
initKeyboard()
screen=getScreen()
page=Bitmap(640,480)
bitmap=loadBitmap("smiley.bmp")
font1=loadFont("pixelfont.pcx")
font2=loadFont("comic.pcx")
font=getFont()
t=0
sinex=640
# XColor is a class to encapsulate a color value.
# All methods return self in XColor, thus you can say
# XColor().unpack(someColor).shift1("hue",3).getColor()
# to retrieve someColor as a Allegro-compliant color
# with hue shifted by 3. That is, it's equivalent to
# someColor -> rgb triplet
# h,s,v=rgbHsv(r,g,b)
# h+=3
# r,g,b=hsvRgb(h,s,v)
# Color(r,g,b)
sinecolor=XColor(0,0,0)
sinecolor.setHsv(0,1,1)
shifts= [
("hue",1.0,1),
("sat",-0.005,1)
]
ft=time.time()
frames=0
fps=0
fpslimit=1
while not keyDown(constants.KEY_ESC):
if keyDown(constants.KEY_Q): fpslimit=1
if keyDown(constants.KEY_W): fpslimit=0
if keyDown(constants.KEY_S): page.save("test_load.bmp")
page.clear()
t+=0.1
solidMode()
for z in range(10):
sz=200+math.cos(t)*100
x=320+math.cos(t*0.5+z)*sz
y=240+math.sin(t*0.6-z)*sz
bitmap.rotateSprite(page,x-105,y-105,t*(15-z*6))
sx=sinex
sinetext="PyAlleg Sine Scroller! FPS: %d"%fps
sinecolor.shift(shifts,1)
color=sinecolor.getColor()
tx=t%20
if tx<10:
sinefont=font1
else:
sinefont=font2
for n,c in enumerate(sinetext):
xc=sx
yc=160+math.sin(t+n*0.5)*20.0
sinefont.draw(page,xc-1,yc-1,0,c)
sinefont.draw(page,xc+1,yc+1,0,c)
sinefont.draw(page,xc-1,yc+1,0,c)
sinefont.draw(page,xc+1,yc-1,0,c)
sinefont.draw(page,xc,yc,color,c)
sx+=sinefont.length(c)+1
font.draw(page,0,0,0xFFFFFF,"FPS limit[%d]: q/w | S to save image"%fpslimit)
screen.acquire()
page.blit(screen)
screen.release()
sinex-=1
if sinex<-600: sinex=640
frames+=1
if fps>30 and fpslimit:
time.sleep(0.01)
if time.time()-ft>0.2:
ft=time.time()
fps=frames*5
frames=0
|
[
"akx@iki.fi"
] |
akx@iki.fi
|
4c6724a90a253228261c56b46258222d275e4a7b
|
27923f62fa5544c84d3c54c90f325525205381bc
|
/tests/js/classifier/RandomForestClassifierTest.py
|
d9ac8eb96bf61721e55f9d9f20fb4b63f2a8c4b8
|
[
"MIT"
] |
permissive
|
JasonKessler/sklearn-porter
|
8e8eb34931e4c82289b6a08cdd29c1c73f032e1c
|
eaa094e122812d88b6f674dee9bed3ceb8b36e96
|
refs/heads/master
| 2020-05-20T18:42:48.472607
| 2017-02-08T22:00:22
| 2017-02-08T22:00:22
| 84,506,202
| 1
| 1
| null | 2017-03-10T01:34:15
| 2017-03-10T01:34:15
| null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
# -*- coding: utf-8 -*-
import unittest
from sklearn.ensemble import RandomForestClassifier
from sklearn_porter import Porter
from ..JavaScriptTest import JavaScriptTest
class RandomForestClassifierTest(JavaScriptTest, unittest.TestCase):
def setUp(self):
super(RandomForestClassifierTest, self).setUp()
self.porter = Porter(language='js')
clf = RandomForestClassifier(n_estimators=100, random_state=0)
self._port_model(clf)
def tearDown(self):
super(RandomForestClassifierTest, self).tearDown()
|
[
"darius.morawiec@nok.onl"
] |
darius.morawiec@nok.onl
|
a4b35ff16815f444c21e0e23f3f4ba9385c85baa
|
13faa0d553ed6c6a57791db3dfdb2a0580a1695b
|
/CodeChef/Practice/Easy/CARVANS.py
|
dac8fc3fca89d8ccbf494609774fa48b766d384c
|
[] |
no_license
|
kautsiitd/Competitive_Programming
|
ba968a4764ba7b5f2531d03fb9c53dc1621c2d44
|
a0d8ae16646d73c346d9ce334e5b5b09bff67f67
|
refs/heads/master
| 2021-01-17T13:29:52.407558
| 2017-10-01T09:58:23
| 2017-10-01T09:58:23
| 59,496,650
| 0
| 0
| null | 2017-05-20T17:27:18
| 2016-05-23T15:56:55
|
HTML
|
UTF-8
|
Python
| false
| false
| 275
|
py
|
for _ in range(input()):
n = input()
a = map(int, raw_input().split())
answer = 0
maxPossibleSpeed = 100000000000000000000
for i in range(n):
if maxPossibleSpeed > a[i]:
maxPossibleSpeed = a[i]
answer += 1
print answer
|
[
"kautsiitd@gmail.com"
] |
kautsiitd@gmail.com
|
b60fcc555ff549bc2fd84e7d9d52087d866b4e82
|
d305e9667f18127e4a1d4d65e5370cf60df30102
|
/model_zoo/official/nlp/tinybert/src/assessment_method.py
|
748666e3cef397c4be599168e5dd82d4d296ad2f
|
[
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
imyzx2017/mindspore_pcl
|
d8e5bd1f80458538d07ef0a8fc447b552bd87420
|
f548c9dae106879d1a83377dd06b10d96427fd2d
|
refs/heads/master
| 2023-01-13T22:28:42.064535
| 2020-11-18T11:15:41
| 2020-11-18T11:15:41
| 313,906,414
| 6
| 1
|
Apache-2.0
| 2020-11-18T11:25:08
| 2020-11-18T10:57:26
| null |
UTF-8
|
Python
| false
| false
| 1,900
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""assessment methods"""
import numpy as np
class Accuracy():
"""Accuracy"""
def __init__(self):
self.acc_num = 0
self.total_num = 0
def update(self, logits, labels):
labels = labels.asnumpy()
labels = np.reshape(labels, -1)
logits = logits.asnumpy()
logit_id = np.argmax(logits, axis=-1)
self.acc_num += np.sum(labels == logit_id)
self.total_num += len(labels)
class F1():
"""F1"""
def __init__(self):
self.TP = 0
self.FP = 0
self.FN = 0
def update(self, logits, labels):
"""Update F1 score"""
labels = labels.asnumpy()
labels = np.reshape(labels, -1)
logits = logits.asnumpy()
logit_id = np.argmax(logits, axis=-1)
logit_id = np.reshape(logit_id, -1)
pos_eva = np.isin(logit_id, [2, 3, 4, 5, 6, 7])
pos_label = np.isin(labels, [2, 3, 4, 5, 6, 7])
self.TP += np.sum(pos_eva & pos_label)
self.FP += np.sum(pos_eva & (~pos_label))
self.FN += np.sum((~pos_eva) & pos_label)
print("-----------------precision is ", self.TP / (self.TP + self.FP))
print("-----------------recall is ", self.TP / (self.TP + self.FN))
|
[
"513344092@qq.com"
] |
513344092@qq.com
|
248b5ac5c6d190627d78e44150eb5ad826a328a2
|
0db67bff1f2dcdadecf635ae535add91cb54c4f3
|
/PythonBasis/week07/task08.py
|
12d7899dc36dc321d5e9e2c6401c3c56ac766676
|
[] |
no_license
|
pavelbrnv/Coursera
|
713fdb79dbf6fbde405fc991bd67db0cab30da00
|
cc568f79229147866ff1df8539cf8ea66dc9ccca
|
refs/heads/master
| 2023-03-07T23:21:09.685318
| 2021-02-22T15:08:27
| 2021-02-22T15:08:27
| 336,600,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
inFile = open('input.txt', 'r', encoding='utf8')
n = int(inFile.readline())
possible_answers = set(range(1, n + 1))
while True:
line = inFile.readline().strip()
if line == 'HELP':
break
values = set(map(int, line.split()))
intersection = possible_answers & values
difference = possible_answers - values
if len(intersection) > len(difference):
possible_answers = intersection
print('YES')
else:
possible_answers = difference
print('NO')
inFile.close()
print(*sorted(possible_answers))
|
[
"pbaranov@stc-spb.ru"
] |
pbaranov@stc-spb.ru
|
026207928ea78a4906fb1156e9dac2a4b63314bf
|
4388363ba45b95910c25bae3d9c02ad78f4a75d6
|
/python/anaconda/pkgs/bokeh-0.12.5-py27_1/lib/python2.7/site-packages/bokeh/_version.py
|
2d9d3c6ac66a462a5e597d7f4a5d0c307e53a471
|
[] |
no_license
|
locolucco209/MongoScraper
|
d494e02531f4f165b1e821633dc9661c579337b5
|
74476c9f00ee43338af696da7e9cd02b273f9005
|
refs/heads/master
| 2022-11-25T19:09:27.248747
| 2018-07-10T03:54:06
| 2018-07-10T03:54:06
| 137,553,786
| 3
| 1
| null | 2022-11-16T04:32:26
| 2018-06-16T04:49:22
| null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
# This file was generated by 'versioneer.py' (0.17) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
{
"date": "2017-04-05T16:01:29-0300",
"dirty": false,
"error": null,
"full-revisionid": "8f1cfc3b8dd56f815127032a7bb9419dea372ad8",
"version": "0.12.5"
}
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
|
[
"lukemassetti@WestSide-Luke.local"
] |
lukemassetti@WestSide-Luke.local
|
8b2ff510d53caf77dc3fc6a53a9d025f256c25b2
|
801b637b846b5ada47c462ad8547d240ceba72b8
|
/Linked Lists/Remove Linked List Elements.py
|
5fa602316c130cf8f1835f61bcff3fecba943547
|
[] |
no_license
|
shlokashah/Coding-Practice
|
7834fed4b50b85ddcab420e830ecec89638390a5
|
a56e1a4185aba1f32c1169d486b705f28888ca07
|
refs/heads/master
| 2022-11-20T13:00:54.617380
| 2020-07-21T14:35:40
| 2020-07-21T14:35:40
| 252,912,592
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
temp = ListNode()
temp.next = head
cur = temp
while temp.next:
if temp.next.val == val:
temp.next = temp.next.next
else:
temp = temp.next
return cur.next
|
[
"shlokashah0603@gmail.com"
] |
shlokashah0603@gmail.com
|
1c83408411bb27495158a05efcd8fc60e46696ab
|
3395a234e7c80d011607e79c49cd48bf516f256b
|
/dependencies/jedi/third_party/typeshed/third_party/2and3/mypy_extensions.pyi
|
19d99cc9d70ca31f5df8f823fe75a31c3fd78c00
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
srusskih/SublimeJEDI
|
67329b72e184bc9584843968dcc534a002c797a1
|
95c185d778425c04536d53517b0e3fe6dedf8e59
|
refs/heads/master
| 2023-08-24T11:30:37.801834
| 2022-08-30T09:04:17
| 2022-08-30T09:04:17
| 6,241,108
| 669
| 125
|
MIT
| 2022-08-30T09:04:18
| 2012-10-16T08:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 2,051
|
pyi
|
import abc
import sys
from typing import (
Dict, Type, TypeVar, Optional, Union, Any, Generic, Mapping, ItemsView, KeysView, ValuesView,
Callable,
)
_T = TypeVar('_T')
_U = TypeVar('_U')
# Internal mypy fallback type for all typed dicts (does not exist at runtime)
class _TypedDict(Mapping[str, object], metaclass=abc.ABCMeta):
def copy(self: _T) -> _T: ...
# Using NoReturn so that only calls using mypy plugin hook that specialize the signature
# can go through.
def setdefault(self, k: NoReturn, default: object) -> object: ...
# Mypy plugin hook for 'pop' expects that 'default' has a type variable type.
def pop(self, k: NoReturn, default: _T = ...) -> object: ...
def update(self: _T, __m: _T) -> None: ...
if sys.version_info < (3, 0):
def has_key(self, k: str) -> bool: ...
def viewitems(self) -> ItemsView[str, object]: ...
def viewkeys(self) -> KeysView[str]: ...
def viewvalues(self) -> ValuesView[object]: ...
def __delitem__(self, k: NoReturn) -> None: ...
def TypedDict(typename: str, fields: Dict[str, Type[_T]], total: bool = ...) -> Type[Dict[str, Any]]: ...
def Arg(type: _T = ..., name: Optional[str] = ...) -> _T: ...
def DefaultArg(type: _T = ..., name: Optional[str] = ...) -> _T: ...
def NamedArg(type: _T = ..., name: Optional[str] = ...) -> _T: ...
def DefaultNamedArg(type: _T = ..., name: Optional[str] = ...) -> _T: ...
def VarArg(type: _T = ...) -> _T: ...
def KwArg(type: _T = ...) -> _T: ...
# Return type that indicates a function does not return.
# This type is equivalent to the None type, but the no-op Union is necessary to
# distinguish the None type from the None value.
NoReturn = Union[None] # Deprecated: Use typing.NoReturn instead.
# This is intended as a class decorator, but mypy rejects abstract classes
# when a Type[_T] is expected, so we can't give it the type we want
def trait(cls: Any) -> Any: ...
def mypyc_attr(*attrs: str, **kwattrs: object) -> Callable[[_T], _T]: ...
class FlexibleAlias(Generic[_T, _U]): ...
|
[
"srusskih@users.noreply.github.com"
] |
srusskih@users.noreply.github.com
|
afca66273f25c8e08d273045ec0a1a360be666e4
|
00be95b38365bbf024572c4071aa20edc85deddd
|
/pretraining/models/customnet.py
|
b9207ef3e74ef8fdcbebd153d37281027ca81beb
|
[
"MIT"
] |
permissive
|
tikzoxs/EyeKnowYouSSL_SimCLR
|
2fbbc00642a4068f74a1db6d82bba160ca738346
|
cdb92cf43eff6396fd416b3bba2b5bffcb8072c1
|
refs/heads/main
| 2023-01-12T19:59:24.706886
| 2020-11-15T04:02:59
| 2020-11-15T04:02:59
| 309,214,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
#create your custom net
class CustomNet(nn.Module):
def __init__(self, out_dim=64):
super(CustomNet, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(2, 2)
# # projection MLP
self.l1 = nn.Linear(64, 256)
# self.l2 = nn.Linear(64, out_dim)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.pool(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool(x)
x = self.conv3(x)
x = F.relu(x)
x = self.pool(x)
x = self.conv4(x)
x = F.relu(x)
x = self.pool(x)
h = torch.mean(x, dim=[2, 3])
x = self.l1(h)
return x
|
[
"shamane@ahlab.org"
] |
shamane@ahlab.org
|
db6ac5fe2e00e73bf729e3846e1634923d5a9b37
|
2efe8116a5a60f5f7c46cf1b0ac598be49087942
|
/EpsilonWebsite/EpsilonWebsite/wsgi.py
|
672d83380fb4b4ce6e03f6b0705b39fb5abb00da
|
[] |
no_license
|
SothanaV/EIweb
|
caaf8b9f844ebf28d0a45b7cceaf753277cbe1c7
|
cedca00e74151a6ecb78da7b76d8888c9c94424b
|
refs/heads/master
| 2021-09-04T04:19:30.124958
| 2018-01-15T18:32:47
| 2018-01-15T18:32:47
| 104,970,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
"""
WSGI config for EpsilonWebsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "EpsilonWebsite.settings")
application = get_wsgi_application()
|
[
"nai6320@gmail.com"
] |
nai6320@gmail.com
|
da7d289d9a5eb18c06ab6d897553543f1728130c
|
ad5c6daba04c8e04054085f96d36f5b167a09a37
|
/src/lepl/stream/maxdepth.py
|
b5573f0cf84668787a590009e71e57130452fe94
|
[] |
no_license
|
nyimbi/LEPL
|
f49fee47a3c47d0291d2356e8a1e9b3120e32c05
|
0603505f187acc3c7da2e1a6083833a201f8b061
|
refs/heads/master
| 2021-04-15T13:40:32.860153
| 2018-03-26T14:00:25
| 2018-03-26T14:00:25
| 126,837,047
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,162
|
py
|
# The contents of this file are subject to the Mozilla Public License
# (MPL) Version 1.1 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License
# at http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
# the License for the specific language governing rights and
# limitations under the License.
#
# The Original Code is LEPL (http://www.acooke.org/lepl)
# The Initial Developer of the Original Code is Andrew Cooke.
# Portions created by the Initial Developer are Copyright (C) 2009-2010
# Andrew Cooke (andrew@acooke.org). All Rights Reserved.
#
# Alternatively, the contents of this file may be used under the terms
# of the LGPL license (the GNU Lesser General Public License,
# http://www.gnu.org/licenses/lgpl.html), in which case the provisions
# of the LGPL License are applicable instead of those above.
#
# If you wish to allow use of your version of this file only under the
# terms of the LGPL License and not to allow others to use your version
# of this file under the MPL, indicate your decision by deleting the
# provisions above and replace them with the notice and other provisions
# required by the LGPL License. If you do not delete the provisions
# above, a recipient may use your version of this file under either the
# MPL or the LGPL License.
'''
Raise an exception if the stream is not consumed entirely.
'''
from lepl.stream.core import s_empty, s_fmt, s_deepest, s_next
from lepl.matchers.support import trampoline_matcher_factory
@trampoline_matcher_factory()
def FullFirstMatch(matcher, eos=True):
'''
Raise an exception if the first match fails (if eos=False) or does not
consume the entire input stream (eos=True). The exception includes
information about the location of the deepest match.
This only works for the first match because we cannot reset the stream
facade for subsequent matches (also, if you want multiple matches you
probably want more sophisticated error handling than this).
'''
def _matcher(support, stream1):
# set default maxdepth
s_next(stream1, count=0)
# first match
generator = matcher._match(stream1)
try:
(result2, stream2) = yield generator
if eos and not s_empty(stream2):
raise FullFirstMatchException(stream2)
else:
yield (result2, stream2)
except StopIteration:
raise FullFirstMatchException(stream1)
# subsequent matches:
while True:
result = yield generator
yield result
return _matcher
class FullFirstMatchException(Exception):
'''
The exception raised by `FullFirstMatch`. This includes information
about the deepest point read in the stream.
'''
def __init__(self, stream):
super(FullFirstMatchException, self).__init__(
s_fmt(s_deepest(stream),
'The match failed in {filename} at {rest} ({location}).'))
|
[
"nyimbi@gmail.com"
] |
nyimbi@gmail.com
|
76376141faf3d7231ac68a1fbea4c36860e31d1a
|
4e8a1750e6a9e7368c91bc9296fb1c1ff6b8f3ea
|
/unit08/exercise0806.py
|
af1899a476a88ac2dc06154710faf36c99c4c111
|
[] |
no_license
|
kevin510610/Book_AGuideToPython_Kaiching-Chang
|
961dbd24aa1094664b9a9754f2882d4d7f964289
|
7db7cf8186e02f4210a01fbd4c454f0030b57022
|
refs/heads/master
| 2023-04-16T04:59:51.598236
| 2021-04-13T03:08:32
| 2021-04-13T03:08:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
def factorial(n):
i = 1
p = 1
while i <= n:
p *= i
i += 1
return p
n = int(input("n: "))
print(factorial(n))
# 檔名: exercise0806.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
[
"kevin510610@gmail.com"
] |
kevin510610@gmail.com
|
0ae6c569d7ba64ecd69d11dfaa0d0a8135004962
|
ca23b411c8a046e98f64b81f6cba9e47783d2584
|
/factorize_a_city/libs/utils.py
|
939a7aaaec7f7ab87928e4d456fe97833be80859
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
pdybczak/google-research
|
1fb370a6aa4820a42a5d417a1915687a00613f9c
|
0714e9a5a3934d922c0b9dd017943a8e511eb5bc
|
refs/heads/master
| 2023-03-05T23:16:11.246574
| 2021-01-04T11:30:28
| 2021-01-04T11:30:28
| 326,629,357
| 1
| 0
|
Apache-2.0
| 2021-02-01T12:39:09
| 2021-01-04T09:17:36
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,949
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils ops to support the factorize_city project."""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
layers = tf.contrib.layers
def outlier_normalization(inp, clip_amount=3):
"""Operation for normalizing numpy images with unbounded values.
This is used to normalize log_reflectance and log_shading images which have
unbounded values. This function bounds the min-max of the array to be
plus-minus clip_amount standard deviation of the mean. The clipped range is
then shifted to [0, 1].
Args:
inp: [H, W, 3] A numpy array with unbounded values.
clip_amount: (int) how many standard deviations from the mean to clip by.
Returns:
A tensor of shape [H, W, 3] with values ranging from [0, 1].
"""
sigma = np.std(inp)
mu = np.mean(inp)
inp = np.clip(inp, mu - clip_amount * sigma, mu + clip_amount * sigma)
m = inp - np.min(inp,)
return m / np.max(m)
def pad_panorama_for_convolutions(tensor, ksz, mode):
pad_top = (ksz - 1) // 2
pad_bottom = ksz // 2
pad_left = (ksz - 1) // 2
pad_right = ksz // 2
reflect_pad = [[0, 0], [pad_top, pad_bottom], [0, 0], [0, 0]]
tensor = tf.pad(tensor, reflect_pad, mode)
tensor = tf.concat(
[tensor[:, :, -pad_left:,], tensor, tensor[:, :, :pad_right]], axis=-2)
return tensor
def reduce_median(tensor, axis=0, keep_dims=False):
return tfp.stats.percentile(tensor, 50, axis=axis, keep_dims=keep_dims)
def upsample(tensor, size=2):
unused_b, h, w, unused_d = tensor.shape.as_list()
return tf.compat.v1.image.resize_bilinear(
tensor, [size * h, size * w],
align_corners=False,
half_pixel_centers=True)
def instance_normalization(inp, scope=""):
with tf.compat.v1.variable_scope(scope):
return layers.instance_norm(
inp, center=True, scale=True, trainable=True, epsilon=1e-5)
def compute_circular_average(softmax_distribution):
"""Computes circular average of a batch of softmax_distribution.
Args:
softmax_distribution: [B, K] is a batch of distributions of angles over K
bins which spans [-pi, pi]. Each bin contains the probability of an
orientation in its corresponding angle direction.
Returns:
Circular average, in radians, of shape [B] for each distribution of K-bins.
"""
unused_batch_size, k_bins = softmax_distribution.shape.as_list()
radian_coordinates = tf.linspace(-np.pi, np.pi,
k_bins + 1)[:k_bins] + (np.pi) / k_bins
# Imagine a top-down view of the scene, where the x-axis points out the center
# of the panorama and the +y axis is clockwise.
x_vector_direction = tf.cos(radian_coordinates)
y_vector_direction = tf.sin(radian_coordinates)
expected_x_coordinate = tf.reduce_sum(
softmax_distribution * x_vector_direction[tf.newaxis], axis=-1)
expected_y_coordinate = tf.reduce_sum(
softmax_distribution * y_vector_direction[tf.newaxis], axis=-1)
# Project the circular average to the unit circle to prevent unstable
# expoding gradients when the average is close to the origin of the
# coordinate frame.
dist = tf.sqrt(expected_x_coordinate * expected_x_coordinate +
expected_y_coordinate * expected_y_coordinate + 1e-5)
normx = expected_x_coordinate / dist
normy = expected_y_coordinate / dist
return tf.atan2(normy, normx)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
9bf9a8fa4b7511ee7ddec1c52b7f7f7cc9c701c9
|
fb5d2c4c76b311871b23c1d7266f074d4a709ef6
|
/plotting/plot_ideal_dlogp.py
|
e57cb71de188c520b98ce28fb111ff526349a289
|
[
"AFL-3.0"
] |
permissive
|
philbull/RadioFisher
|
50be8d49d7bdde2712bd35682a359c43f22e3a28
|
fe25f969de9a700c5697168ba9e0d2645c55ed81
|
refs/heads/master
| 2023-01-20T01:27:39.982180
| 2020-11-24T07:44:51
| 2020-11-24T07:44:51
| 315,553,003
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,193
|
py
|
#!/usr/bin/python
"""
Plot fractional constraints on P(k) for Euclid and noise-free versions of
Facility with different amounts of foreground contamination. (Fig. 27)
"""
import numpy as np
import pylab as P
from rfwrapper import rf
import matplotlib.patches
import matplotlib.cm
import os
from radiofisher import euclid
cosmo = rf.experiments.cosmo
names = ['EuclidRef_paper', 'exptCV_efg6_paper', 'exptCV_efg12_paper']
colours = ['#CC0000', '#1619A1', '#5B9C0A', '#990A9C'] # DETF/F/M/S
labels = ['DETF IV', 'Ideal, $\epsilon_\mathrm{FG}=10^{-6}$',
'Ideal, $\epsilon_\mathrm{FG}=10^{-12}$']
linestyle = [[], [8, 4], [2, 4, 6, 4], [3, 4]]
# Get f_bao(k) function
cosmo = rf.load_power_spectrum(cosmo, "cache_pk.dat", force_load=True)
fbao = cosmo['fbao']
# Fiducial value and plotting
P.subplot(111)
for k in range(len(names)):
root = "output/" + names[k]
# Load cosmo fns.
dat = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T
zc, Hc, dAc, Dc, fc = dat
z, H, dA, D, f = np.genfromtxt(root+"-cosmofns-smooth.dat").T
kc = np.genfromtxt(root+"-fisher-kc.dat").T
# Load Fisher matrices as fn. of z
Nbins = zc.size
F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)]
# EOS FISHER MATRIX
# Actually, (aperp, apar) are (D_A, H)
pnames = rf.load_param_names(root+"-fisher-full-0.dat")
zfns = []; excl = []
F, lbls = rf.combined_fisher_matrix( F_list,
expand=zfns, names=pnames,
exclude=excl )
# Just do the simplest thing for P(k) and get 1/sqrt(F)
cov = [np.sqrt(1. / np.diag(F)[lbls.index(lbl)]) for lbl in lbls if "pk" in lbl]
cov = np.array(cov)
pk = cosmo['pk_nobao'](kc) * (1. + fbao(kc))
# Replace nan/inf values
cov[np.where(np.isnan(cov))] = 1e10
cov[np.where(np.isinf(cov))] = 1e10
pw0 = rf.indexes_for_sampled_fns(11, zc.size, zfns)
pwa = rf.indexes_for_sampled_fns(12, zc.size, zfns)
print "-"*50
print names[k]
#print cov
print lbls[pw0], 1. / np.sqrt(F[pw0,pw0])
print lbls[pwa], 1. / np.sqrt(F[pwa,pwa])
"""
if k == 0:
# Plot shaded region
P.fill_between(kc, np.ones(kc.size)*1e-10, cov, facecolor='#e1e1e1', edgecolor='none')
else:
# Plot errorbars
P.plot(kc, cov, color=colours[k], label=labels[k], lw=2.2, ls=linestyle[k])
"""
line = P.plot(kc, cov, color=colours[k], label=labels[k], lw=2.4)
# Set custom linestyle
line[0].set_dashes(linestyle[k])
P.xscale('log')
P.yscale('log')
P.xlim((1e-3, 1.5e0))
P.ylim((8e-4, 1e1))
P.legend(loc='lower left', prop={'size':'large'}, frameon=False)
P.tick_params(axis='both', which='major', labelsize=20, size=8., width=1.5, pad=8.)
P.tick_params(axis='both', which='minor', labelsize=20, size=5., width=1.5)
P.xlabel(r"$k \,[\mathrm{Mpc}^{-1}]$", fontdict={'fontsize':'xx-large'})
P.ylabel(r"$\Delta P / P$", fontdict={'fontsize':'xx-large'})
P.tight_layout()
# Set size
#P.gcf().set_size_inches(8.,6.)
P.savefig('fig27-dlogp-ideal.pdf', transparent=True) # 100
P.show()
|
[
"philbull@gmail.com"
] |
philbull@gmail.com
|
f0b2ebaf72776e0d44d6bcd2b5874668d37c3582
|
8bd3229c4f07243c5756a029f507235e49221d21
|
/Store/src/products/migrations/0007_remove_category_catname2.py
|
5b5fc3c56ba8de8ea514074faf7386f03e2a398b
|
[] |
no_license
|
ammaralazie/Online-store
|
4d937cbd022c36f9f671593e9e6122edce262f54
|
51e6a8518ab52ce9e6bb589cce31876c944fc191
|
refs/heads/master
| 2023-02-12T14:51:55.088522
| 2021-01-09T04:04:14
| 2021-01-09T04:04:14
| 304,319,989
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 331
|
py
|
# Generated by Django 3.1 on 2020-08-24 13:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0006_auto_20200824_1308'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='CATName2',
),
]
|
[
"alaziiammar@gmail.com"
] |
alaziiammar@gmail.com
|
f3523dde04d19cfbb77789c443ba224da4bdcd25
|
5456502f97627278cbd6e16d002d50f1de3da7bb
|
/chromeos/DEPS
|
757faf1e910c622d4ec213d9ea619db55cb922fd
|
[
"BSD-3-Clause"
] |
permissive
|
TrellixVulnTeam/Chromium_7C66
|
72d108a413909eb3bd36c73a6c2f98de1573b6e5
|
c8649ab2a0f5a747369ed50351209a42f59672ee
|
refs/heads/master
| 2023-03-16T12:51:40.231959
| 2017-12-20T10:38:26
| 2017-12-20T10:38:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
# Please keep the dependencies here to a minimum. This is intended to be a
# low level Chrome OS system library that may be used by targets that need to
# be kept as small as possible.
include_rules = [
"+components/device_event_log",
"+components/policy/proto",
"+components/pref_registry",
"+components/prefs",
"+components/signin/core/account_id/account_id.h",
"+components/user_manager/known_user.h",
"+crypto",
"+net",
"+third_party/cros_system_api",
"+third_party/libxml",
"+third_party/protobuf",
# Some targets may not have any UI, so explictly exclude src/ui.
"-ui",
]
|
[
"lixiaodonglove7@aliyun.com"
] |
lixiaodonglove7@aliyun.com
|
|
7ce3edaa3f5528687a51a5632a5bf3a96b5872cf
|
5e4d6df8fa464f4270855846bf0708ae24d4a572
|
/blabla/0625-0701_Antai_src/src/0626/1003time.py
|
54ff79984180c1e77ca402b85bd50491051ff27a
|
[] |
no_license
|
xy2333/tianchi-CBE
|
2455796a7241db65ef42dd4f00c6a13fb462f246
|
f169d21290e25ccf65bb7a0040e83cb9ef5f9dc2
|
refs/heads/master
| 2020-06-12T12:09:59.295929
| 2019-08-05T03:43:18
| 2019-08-05T03:43:18
| 194,294,624
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
#encoding=utf-8
# 将train表的数据整理出四个is_morning,afternoon,night,midnight等
t1 = '2018-07-23 17:58:40' # 星期1
t2 = '2018-07-24 17:58:40' # 星期2
t3 = '2018-07-25 17:58:40' # 星期3
t4 = '2018-07-26 17:58:40' # 星期4
t5 = '2018-07-27 10:58:40' # 星期5
t6 = '2018-07-28 17:58:40' # 星期6
t7 = '2018-07-29 17:58:40' # 星期7
t8 = '2018-07-30 17:58:40' # 星期7
import time
t_1 = time.strptime(t1,"%Y-%m-%d %H:%M:%S")
t_2 = time.strptime(t2,"%Y-%m-%d %H:%M:%S")
t_3 = time.strptime(t3,"%Y-%m-%d %H:%M:%S")
t_4 = time.strptime(t4,"%Y-%m-%d %H:%M:%S")
t_5 = time.strptime(t5,"%Y-%m-%d %H:%M:%S")
t_6 = time.strptime(t6,"%Y-%m-%d %H:%M:%S")
t_7 = time.strptime(t7,"%Y-%m-%d %H:%M:%S")
t_8 = time.strptime(t8,"%Y-%m-%d %H:%M:%S")
# is weekday
# def is_weekday(dt):
# if dt.tm_wday<5:
# print "Weekday"
# else:
# print "Weekend"
# print is_weekday(t_1)
# print is_weekday(t_2)
# print is_weekday(t_3)
# print is_weekday(t_4)
# print is_weekday(t_5)
# is morning
def is_morning(dt): # 5:00-11:00
if 4 < dt.tm_hour and dt.tm_hour < 12:
return 1
else:
return 0
def is_afternoon(dt): # 11:00-17:00
if 10 < dt.tm_hour and dt.tm_hour < 18:
return 1
else:
return 0
def is_night(dt): # 17:00-23:00
if 16 < dt.tm_hour and dt.tm_hour < 24:
return 1
else:
return 0
def is_midnight(dt): # 23:00-5:00
if dt < 5 or dt.tm_hour == 24:
return 1
else:
return 0
is_morning(t_5)
print ""
|
[
"2531188679@qq.com"
] |
2531188679@qq.com
|
64128404526fc7098153d4f5fada2b52e72e6af3
|
2b9397e9e26f7d97ce6983d36c9842ac773b70c6
|
/operation/migrations/0071_auto_20190724_1239.py
|
45003f13e5b832519139fd299b437ec39213d210
|
[] |
no_license
|
eakDev/aip-1
|
288ed7d7b8cf65c74b510f4f4e45292e3342796d
|
3db2520e3c246e25e2cfa62e395a3ba6ebe37252
|
refs/heads/main
| 2023-05-02T08:57:42.449727
| 2021-05-23T10:16:59
| 2021-05-23T10:16:59
| 386,578,482
| 1
| 0
| null | 2021-07-16T09:15:22
| 2021-07-16T09:15:22
| null |
UTF-8
|
Python
| false
| false
| 2,163
|
py
|
# Generated by Django 2.1.1 on 2019-07-24 04:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('operation', '0070_projectitem_dependency'),
]
operations = [
migrations.RemoveField(
model_name='majorexpense',
name='budget',
),
migrations.RemoveField(
model_name='majorexpense',
name='spending',
),
migrations.AddField(
model_name='majorexpense',
name='admin',
field=models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='Admin'),
),
migrations.AddField(
model_name='majorexpense',
name='drawings',
field=models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='Drawings'),
),
migrations.AddField(
model_name='majorexpense',
name='equipment',
field=models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='Equipment'),
),
migrations.AddField(
model_name='majorexpense',
name='labor',
field=models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='Labor'),
),
migrations.AddField(
model_name='majorexpense',
name='materials',
field=models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='Materials'),
),
migrations.AddField(
model_name='majorexpense',
name='overhead',
field=models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='Overhead'),
),
migrations.AddField(
model_name='majorexpense',
name='total',
field=models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='Total'),
),
migrations.AddField(
model_name='majorexpense',
name='vat',
field=models.DecimalField(decimal_places=2, default=0, max_digits=20, verbose_name='VAT'),
),
]
|
[
"clyde.khayad@gmail.com"
] |
clyde.khayad@gmail.com
|
bc4bb2ab64e0a17a47e1b6f43fd2b3437f721193
|
6cbaade56c5db347d1be9a3422a69af52df39b97
|
/python_workspace/3_bigdata/02_Standardization_Analysis/03_DB/3db_update_rows.py
|
5adb87cf5b4001047ddbf256047752fe4797d8e7
|
[] |
no_license
|
baewonje/iot_bigdata_-
|
b54e3772f64b9695efee8632183590628b679e11
|
2ce1af67d2f05abeb2ecd442b7299f349bdb9753
|
refs/heads/master
| 2020-09-06T09:53:53.018320
| 2019-12-06T08:19:33
| 2019-12-06T08:19:33
| 220,390,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,662
|
py
|
# !/usr/bin/env python3
import csv
import sqlite3
import sys
# path to and name of a CSV input file
input_file = sys.argv[1] #data_for_updating.csv
# Creat an in-memory SQLite3 database
# Create a table called sales with four attributes
con = sqlite3.connect(':memory:')
query = """CREATE TABLE IF NOT EXISTS sales
(customer VARCHAR(20),
product VARCHAR(40),
amount FLOAT,
Date DATE);"""
con.execute(query)
con.commit()
# Insert a few rows of data into the table
data = [('Richard Lucas', 'Notepad', 2.50, '2014-01-02'),
('Jenny Kim', 'Binder', 4.15,'2014-01-15'),
('Svetlana Crow', 'Printer', 155.75,'2014-02-03'),
('Stephen Randolph', 'Computer', 679.40, '2014-02-20')]
for tuple in data:
print(tuple)
statement = "INSERT INTO sales VALUES(?, ?, ?, ?)"
con.executemany(statement, data)
con.commit()
# Read the CSV file and update the specific rows
file_reader = csv.reader(open(input_file, 'r'), delimiter=',')
header = next(file_reader, None)
for row in file_reader:
data = []
for column_index in range(len(header)):
data.append(row[column_index])
print(data)
con.execute("UPDATE sales SET amount=?, date=? WHERE customer=?;", data)
# update [테이블명] SET [필드명]= 변경값... where[필드명]=[필터링조건값];
# update는 반드시 where조건이 동반되어야 한다.
con.commit()
# Query the sales table
cursor = con.execute("SELECT * FROM sales")
rows = cursor.fetchall()
for row in rows:
output = []
for column_index in range(len(row)):
output.append(str(row[column_index]))
print(output)
|
[
"50129576+baewonje@users.noreply.github.com"
] |
50129576+baewonje@users.noreply.github.com
|
d2a94da24516c80d5f77b6421de322c3da5d2878
|
5e27c7f5426c169fd348b26e94b65c35f9cdc459
|
/hiveguilib/HBlender/NodeItemManager.py
|
4b7b29f0dc48b1dc592c47a1c27d9ff0efbb321a
|
[
"BSD-2-Clause"
] |
permissive
|
agoose77/hivesystem
|
e2c9c27408233b5794151ca74f541d2e6063d58a
|
e1f55c5ea530a989477edb896dcd89f3926a31b8
|
refs/heads/master
| 2020-07-21T23:07:37.178856
| 2014-08-23T02:13:19
| 2014-08-23T02:13:19
| 20,776,359
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,172
|
py
|
import bpy
from . import level
class NodeItem:
"""Operator entry within the Add node menu"""
def __init__(self, manager, key, fullkey):
self.manager = manager
self.key = key
self.fullkey = fullkey
def _active(self, context):
if context.space_data.edit_tree is None:
return False
if context.space_data.edit_tree.name not in self.manager._nodeitem_trees[self.fullkey]:
return False
if not level.active(context, tuple(self.fullkey.split("."))):
return
return True
def draw(self, layout, context):
default_context = bpy.app.translations.contexts.default
props = layout.operator("node.add_hive_node", text=self.key, text_ctxt=default_context)
props.type = self.fullkey
class NodeItemMenu:
"""Menu entry within the Add node menu"""
name = "NODE_MT_HIVE"
def __init__(self, title, fullname, make_panel=False):
if title is not None:
assert fullname is not None
self.title = title
self.fullname = fullname
self.children = []
def menudraw(struct, context):
if not level.active(context, self.fullname):
return
return self.draw(struct.layout, context)
cls_dict = dict(bl_space_type='NODE_EDITOR', bl_label="<HiveMenu>", draw=menudraw, poll=self.poll)
name = self.name
if self.fullname is not None:
name = self.name + "_" + "_".join(self.fullname)
self.name = name
self.menu_class = type(name, (bpy.types.Menu,), cls_dict)
if make_panel:
type_name = name.replace("NODE_MT_", "NODE_PT_")
cls_dict = dict(bl_space_type='NODE_EDITOR', bl_label=title, bl_region_type='TOOLS',
bl_options={'DEFAULT_CLOSED'}, poll=self._active, draw=menudraw)
self.panel_class = type(type_name, (bpy.types.Panel,), cls_dict)
else:
self.panel_class = None
def register(self):
if self.panel_class is not None:
bpy.utils.register_class(self.panel_class)
bpy.utils.register_class(self.menu_class)
def unregister(self):
if self.panel_class is not None:
bpy.utils.unregister_class(self.panel_class)
bpy.utils.unregister_class(self.menu_class)
def _active(self, context):
if not level.active(context, self.fullname):
return False
for child in self.children:
if child._active(context):
return True
return False
def draw(self, layout, context):
col = layout.column()
for child in self.children:
if not child._active(context):
continue
if isinstance(child, NodeItemMenu):
layout.menu(self.name + "_" + child.title, text=child.title)
else:
child.draw(col, context)
@classmethod
def poll(menucls, context):
return False
class NodeItemManager:
def __init__(self):
self._nodeitem_objects = NodeItemMenu(None, None)
self._nodeitems = {}
self._nodeitem_names = []
self._nodeitem_trees = {}
def append(self, node_tree_name, path):
full_path = ".".join(path)
if full_path not in self._nodeitem_names:
self._nodeitem_names.append(full_path)
self._nodeitem_trees[full_path] = []
item = NodeItem(self, path[-1], full_path)
self._nodeitems[path] = item
child = item
for key_index in range(len(path) - 1, 0, -1):
path_slice = path[:key_index]
if path_slice not in self._nodeitems:
path_component = path[key_index - 1]
make_panel = (key_index == 1)
menu = NodeItemMenu(path_component, path_slice, make_panel)
menu.register()
self._nodeitems[path_slice] = menu
else:
menu = self._nodeitems[path_slice]
if child not in menu.children:
menu.children.append(child)
child = menu
if child not in self._nodeitem_objects.children:
self._nodeitem_objects.children.append(child)
self._nodeitem_trees[full_path].append(node_tree_name)
def remove(self, node_tree_name, key):
# TODO implement nodeitem remove
raise NotImplementedError
def rename(self, old_node_tree_name, new_node_tree_name):
for full_key, node_trees in self._nodeitem_trees.items():
if not old_node_tree_name in node_trees:
continue
node_trees[node_trees.index(old_node_tree_name)] = new_node_tree_name
def draw_menu(self, struct, context):
menu = self._nodeitem_objects
if not menu._active(context):
return
menu.draw(struct.layout, context)
def register(self):
bpy.types.NODE_MT_add.append(self.draw_menu)
def unregister(self):
bpy.types.NODE_MT_add.remove(self.draw_menu)
|
[
"goosey15@gmail.com"
] |
goosey15@gmail.com
|
a6c8ba694d4221f97b510caa34d07f1f90b4820c
|
196137e16065125b854f00509849aaf4bd2d0394
|
/account/urls.py
|
470f067ac27933fb4a7774b6324375f32870c869
|
[] |
no_license
|
ksuvarna85/unicode_quiz
|
2f74f20fa5327a43e85a3e20538e9c3620a1e4dc
|
d45df6be213b44d1704163c7137d96c8695bfced
|
refs/heads/master
| 2022-12-27T06:04:37.912161
| 2020-10-16T05:17:22
| 2020-10-16T05:17:22
| 296,305,870
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
from django.contrib import admin
from django.urls import path,include
from account import views
app_name='account'
urlpatterns=[
path('register/',views.student_register,name='student_register'),
path('teacher_register/',views.teacher_register,name='teacher'),
path('login/',views.user_login,name='user_login'),
path('teacher_list/',views.ChapterListView.as_view(),name='list'),
path('teacher_result/<int:chp_pk>/',views.student_result,name='result_teacher'),
path('teacher_list/<int:pk>/',views.QuestionDetailView.as_view(),name='detail'),
path('addquestion/',views.questionform_view,name='add'),
path('update/<int:pk>/',views.QuestionUpdateView.as_view(),name='update'),
path('create/',views.ChapterCreateView.as_view(),name='create'),
path('student/<int:student_pk>/',views.student_chp_lst,name='list_fun'),
path('student/<int:student_pk>/<int:chp_pk>/',views.question_detail,name='detail_fun'),
path('delete/<int:pk>/',views.ChapterDeleteView.as_view(),name='delete'),
path('student_result/<int:student_pk>/<int:chp_pk>/',views.result,name='result'),
]
|
[
"you@example.com"
] |
you@example.com
|
e59211370261a20210a37aca73990884fc1ae746
|
8606267410dabfeacb4b7ff285a8d2250c139acc
|
/store/migrations/0001_initial.py
|
02752dd9c03ef9ffa7bb9baf7b45f72a0984e8d2
|
[] |
no_license
|
Taraltinu/chopping-Site
|
a5e6f6eeeecb4fef92f90770a3c2493eca0f0bde
|
1b722d53de1baaa5780701416f78dab62ef7d057
|
refs/heads/master
| 2022-12-20T07:06:16.602476
| 2020-10-02T18:07:31
| 2020-10-02T18:07:31
| 300,697,693
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
# Generated by Django 3.1.1 on 2020-09-11 10:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Produc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('price', models.IntegerField()),
('image', models.ImageField(upload_to='products/')),
('description', models.CharField(max_length=300)),
],
),
]
|
[
"tinu1316@gmail.com"
] |
tinu1316@gmail.com
|
5333cb7e40f6f61d1f108d164fb66a2042e93863
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayOpenMiniInnerVersionproportionModifyModel.py
|
b4cc7d37f14d975472b8a0fb77f70ed09f8bac46
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,311
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniInnerVersionproportionModifyModel(object):
def __init__(self):
self._app_origin = None
self._bundle_id = None
self._dev_id = None
self._mini_app_id = None
self._operate_id = None
self._value = None
@property
def app_origin(self):
return self._app_origin
@app_origin.setter
def app_origin(self, value):
self._app_origin = value
@property
def bundle_id(self):
return self._bundle_id
@bundle_id.setter
def bundle_id(self, value):
self._bundle_id = value
@property
def dev_id(self):
return self._dev_id
@dev_id.setter
def dev_id(self, value):
self._dev_id = value
@property
def mini_app_id(self):
return self._mini_app_id
@mini_app_id.setter
def mini_app_id(self, value):
self._mini_app_id = value
@property
def operate_id(self):
return self._operate_id
@operate_id.setter
def operate_id(self, value):
self._operate_id = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def to_alipay_dict(self):
params = dict()
if self.app_origin:
if hasattr(self.app_origin, 'to_alipay_dict'):
params['app_origin'] = self.app_origin.to_alipay_dict()
else:
params['app_origin'] = self.app_origin
if self.bundle_id:
if hasattr(self.bundle_id, 'to_alipay_dict'):
params['bundle_id'] = self.bundle_id.to_alipay_dict()
else:
params['bundle_id'] = self.bundle_id
if self.dev_id:
if hasattr(self.dev_id, 'to_alipay_dict'):
params['dev_id'] = self.dev_id.to_alipay_dict()
else:
params['dev_id'] = self.dev_id
if self.mini_app_id:
if hasattr(self.mini_app_id, 'to_alipay_dict'):
params['mini_app_id'] = self.mini_app_id.to_alipay_dict()
else:
params['mini_app_id'] = self.mini_app_id
if self.operate_id:
if hasattr(self.operate_id, 'to_alipay_dict'):
params['operate_id'] = self.operate_id.to_alipay_dict()
else:
params['operate_id'] = self.operate_id
if self.value:
if hasattr(self.value, 'to_alipay_dict'):
params['value'] = self.value.to_alipay_dict()
else:
params['value'] = self.value
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniInnerVersionproportionModifyModel()
if 'app_origin' in d:
o.app_origin = d['app_origin']
if 'bundle_id' in d:
o.bundle_id = d['bundle_id']
if 'dev_id' in d:
o.dev_id = d['dev_id']
if 'mini_app_id' in d:
o.mini_app_id = d['mini_app_id']
if 'operate_id' in d:
o.operate_id = d['operate_id']
if 'value' in d:
o.value = d['value']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
2f8dcf3655a9ae3cdee7dbe1d09991ec55257159
|
e3bdb7844f634efd89109079d22cade713c4899d
|
/openapi_client/models/void_transaction_all_of.py
|
ce89ea1172ba74575cfc5d49430f63a171e6cf8d
|
[] |
no_license
|
pc-coholic/Python
|
5170c27da09b066c353e09539e404961f7ad50b7
|
b7251c31339b579f71fb7ee9db05be51e9e43361
|
refs/heads/master
| 2023-04-19T02:42:02.914726
| 2021-04-26T16:07:37
| 2021-04-26T16:07:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,420
|
py
|
# coding: utf-8
"""
Payment Gateway API Specification.
The documentation here is designed to provide all of the technical guidance required to consume and integrate with our APIs for payment processing. To learn more about our APIs please visit https://docs.firstdata.com/org/gateway. # noqa: E501
The version of the OpenAPI document: 21.2.0.20210406.001
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class VoidTransactionAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'transaction_amount': 'Amount'
}
attribute_map = {
'transaction_amount': 'transactionAmount'
}
def __init__(self, transaction_amount=None): # noqa: E501
"""VoidTransactionAllOf - a model defined in OpenAPI""" # noqa: E501
self._transaction_amount = None
self.discriminator = None
if transaction_amount is not None:
self.transaction_amount = transaction_amount
@property
def transaction_amount(self):
"""Gets the transaction_amount of this VoidTransactionAllOf. # noqa: E501
:return: The transaction_amount of this VoidTransactionAllOf. # noqa: E501
:rtype: Amount
"""
return self._transaction_amount
@transaction_amount.setter
def transaction_amount(self, transaction_amount):
"""Sets the transaction_amount of this VoidTransactionAllOf.
:param transaction_amount: The transaction_amount of this VoidTransactionAllOf. # noqa: E501
:type: Amount
"""
self._transaction_amount = transaction_amount
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VoidTransactionAllOf):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"emargules@bluepay.com"
] |
emargules@bluepay.com
|
7fa572d27d6541c78c9f0fa12047679f64d428d0
|
6efb8ca0d1a48edf1335e8fd046ef79072282b9c
|
/实验/6章/例题/6-10a-to-A.py
|
f9a13bef6eb3c68edbdf57fa62cbc38546a813f2
|
[] |
no_license
|
RedheatWei/python-study
|
f0c35afd7325982568f554f4eded6a75f9eb8b49
|
3a7dc64028e5246198d7a64c1dc9ee318992020e
|
refs/heads/master
| 2021-01-01T17:13:35.156410
| 2019-07-29T09:18:52
| 2019-07-29T09:18:52
| 98,027,727
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 168
|
py
|
#!/usr/bin/env python
#_*_ coding:utf-8 _*_
'''
Created on 2015Äê2ÔÂ26ÈÕ
@author: Redheat
'''
str_first = raw_input('Enter a string:')
print str_first.swapcase()
|
[
"qjyyn@qq.com"
] |
qjyyn@qq.com
|
c91b6d4d976b7babc5a3e71d20631f1d1e590f59
|
7c69c27a1c6ff2a1552900f4c1001281f4447233
|
/codechef/cnote.py
|
3694d9b77b4c2bf333e653fa8cf7792e0225184b
|
[] |
no_license
|
Hamiltonxx/pyalgorithms
|
894a0228928819601a816c472689ce96a11e1d25
|
92284f6105c5deb7f843ff299ee3ceb6382cf879
|
refs/heads/master
| 2023-09-04T13:01:46.465661
| 2023-09-02T05:50:23
| 2023-09-02T05:50:23
| 231,999,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
T = int(input())
for i in range(T):
X,Y,K,N = map(int, input().split())
left = X-Y
flag=0
for j in range(N):
P,C = map(int, input().split())
if left<=P and C<=K and flag==0:
flag=1
print("LuckyChef") if flag else print("UnluckyChef")
|
[
"hamiltonhgz@gmail.com"
] |
hamiltonhgz@gmail.com
|
5e419e8f6008694b7ab2272f54bb1a47a63ce4d4
|
b341a8d120737297aa8fd394a23633dac9b5ccda
|
/accounts/migrations/0002_remove_customuser_department.py
|
491606403866741ecec2bdf0b085d4202f9d193b
|
[] |
no_license
|
Minari766/disney_side_stories
|
16d97cb02bf00aa5439d59f753abb9a4706a30aa
|
aa2d88b1b0fdd87a27f41318bd3ec7352229b6ff
|
refs/heads/main
| 2023-08-15T07:03:16.922579
| 2021-10-03T07:47:22
| 2021-10-03T07:47:22
| 306,496,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
# Generated by Django 2.2.16 on 2021-01-13 14:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='customuser',
name='department',
),
]
|
[
"mina3.ryu0728@gmail.com"
] |
mina3.ryu0728@gmail.com
|
616cc832511e0d987c803eef500c6c3d52031364
|
d8b201ba6bf57db0101d88836429bbcb3a10b857
|
/Debugging/WordsScore.py
|
498ac004178cda6a9c940b240758bb48d0b5f954
|
[
"MIT"
] |
permissive
|
MaxCodeXTC/PythonHackerRankSolutions
|
32ad41df3fbd33f8651cdc5099c8ec3d37d9bc17
|
987618b61b71fe5e9a40275fb348476657bbea57
|
refs/heads/master
| 2022-06-28T06:00:19.126751
| 2020-05-07T09:23:37
| 2020-05-07T09:23:37
| 262,471,271
| 1
| 0
| null | 2020-05-09T02:24:11
| 2020-05-09T02:24:10
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
'''
Title : Words Score
Subdomain : Debugging
Domain : Python
Author : codeperfectplus
Created : 17 January 2020
'''
|
[
"54245038+perfect104@users.noreply.github.com"
] |
54245038+perfect104@users.noreply.github.com
|
c275f43eb61c8eb74f5a97d674bd1f452c0c7b93
|
386a5b505d77c9798aaab78495d0f00c349cf660
|
/Prognos Project/Working/Latiket Jaronde Git/DJango examples/DynamicUrls/urlDemo/views.py
|
bd2b5dba4a0956ad113487a7d28a7ebb91ba1c86
|
[] |
no_license
|
namratarane20/MachineLearning
|
2da2c87217618d124fd53f607c20641ba44fb0b7
|
b561cc74733b655507242cbbf13ea09a2416b9e2
|
refs/heads/master
| 2023-01-20T18:54:15.662179
| 2020-03-09T14:12:44
| 2020-03-09T14:12:44
| 237,597,461
| 0
| 0
| null | 2023-01-05T12:37:12
| 2020-02-01T10:22:20
|
Python
|
UTF-8
|
Python
| false
| false
| 897
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home(request, name=""):
l = [{'a': 1, 'b': 2, 'c': 3}, {'a': 11, 'b': 22, 'c': 33}, {'a': 111, 'b': 222, 'c': 333}]
if name == "":
data = ['latiket', 'akash', 10, 20]
name = "Dom"
context = {"msg": "welcome ", "data": data, "name": name, 'l': l}
return render(request, "urlDemo/home.html", context)
else:
data = [10, 20]
context = {"msg": "welcome ", "data": data, "name": name, 'first': True, 'l': l}
return render(request, "urlDemo/second.html", context)
def out(request, name, d):
print("inside out")
print("d1 = ", d)
data = ['latiket', 'akash', 10, 20]
context = {"msg": "welcome ", "data": data, "number": d, "name": name, 'first': False}
return render(request, "urlDemo/second.html", context)
|
[
"namrata.ashok@impelsys.com"
] |
namrata.ashok@impelsys.com
|
1758849bf2661b3f1dcfd1af37f68a1e02729240
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/parcoords/dimension/_templateitemname.py
|
a6797fb9536cd704210159e36fd5b521aea5421a
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='templateitemname',
parent_name='parcoords.dimension',
**kwargs
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='info',
**kwargs
)
|
[
"noreply@github.com"
] |
miladrux.noreply@github.com
|
0f149c6acecb80330e3c79511180324dbce155fb
|
2aba62d66c2c622bdc148cef451da76cae5fd76c
|
/exercise/learn_python_dm2039/ch30/ch30_17.py
|
b986aa91ea8f242a52bc9f62580e37d7407553c6
|
[] |
no_license
|
NTUT-109AB8011/crawler
|
6a76de2ab1848ebc8365e071e76c08ca7348be62
|
a703ec741b48d3af615a757fed7607b1f8eb66a6
|
refs/heads/master
| 2023-03-26T22:39:59.527175
| 2021-03-30T03:29:22
| 2021-03-30T03:29:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
# ch30_17.py
import threading
import time
def worker():
print(threading.currentThread().getName(), 'Starting')
time.sleep(3)
print(threading.currentThread().getName(), 'Exiting')
w = threading.Thread(name='worker',target=worker)
w.start()
print('start join')
w.join(1.5) # 等待worker執行緒1.5秒工作完成才往下執行
print("是否working執行緒仍在工作 ? ", w.isAlive())
time.sleep(2) # 主執行緒休息2秒
print("是否working執行緒仍在工作 ? ", w.isAlive())
print('end join')
|
[
"terranandes@gmail.com"
] |
terranandes@gmail.com
|
93ca2f8487f174dab4e789d314ee88d24e2f3ce9
|
247508a09bbcd08f75de7c85118caf857941f9dd
|
/python/lvmscp/actor/commands/focus.py
|
df0a3a6a39a0203eecff28d2afa3789178f3787b
|
[
"BSD-3-Clause"
] |
permissive
|
sdss/lvmscp
|
051b6aad20d2a184ac046be086331fa06f2d3fa2
|
f74d83997cbba01a0c5b55615fbe6dbf0572b8c9
|
refs/heads/main
| 2023-09-02T19:47:20.394310
| 2023-08-31T19:32:15
| 2023-08-31T19:32:15
| 348,923,320
| 2
| 0
|
BSD-3-Clause
| 2022-04-11T02:59:57
| 2021-03-18T03:03:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,827
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
# @Date: 2022-05-14
# @Filename: focus.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
from __future__ import annotations
from typing import TYPE_CHECKING
import click
from archon.actor.commands import parser
if TYPE_CHECKING:
from archon.controller import ArchonController
from ..actor import CommandType
__all__ = ["focus"]
async def move_hds(
command: CommandType,
spectro: str,
side: str = "all",
action: str = "open",
verbose: bool = False,
):
"""Helper to open/close HDs."""
if verbose:
if action == "open":
command.info(f"Opening {side} Hartmann door(s).")
else:
command.info(f"Closing {side} Hartmann door(s).")
hd_cmd = await (
await command.send_command("lvmieb", f"hartmann {action} -s {side} {spectro}")
)
if hd_cmd.status.did_fail:
command.fail(
"Failed moving Hartmann doors. See lvmieb log for more information."
)
return False
return True
# TODO: needs rewriting for different specs.
@parser.command()
@click.argument("SPECTRO", type=click.Choice(["sp1", "sp2", "sp3"]))
@click.argument("EXPTIME", type=float)
@click.option("-n", "--count", type=int, default=1, help="Number of focus cycles.")
@click.option("--dark", flag_value=True, help="Take a dark along each exposure.")
async def focus(
command: CommandType,
controllers: dict[str, ArchonController],
spectro: str,
exptime: float,
count: int = 1,
dark: bool = False,
):
"""Take a focus sequence with both Hartmann doors."""
# TODO: add a check for arc lamps or, better, command them to be on.
for n in range(count):
if count != 1:
command.info(f"Focus iteration {n+1} out of {count}.")
for side in ["left", "right"]:
# Open both HDs.
if not (await move_hds(command, spectro, "all", "open", verbose=False)):
return
# Close HD.
if not (await move_hds(command, spectro, side, "close", verbose=True)):
return
# Arc exposure.
command.info("Taking arc exposure.")
expose_cmd = await command.send_command(
"lvmscp", f"expose --arc -c {spectro} {exptime}"
)
await expose_cmd
if expose_cmd.status.did_fail:
return command.fail("Failed taking arc exposure.")
filenames = []
for reply in expose_cmd.replies:
if "filenames" in reply.message:
filenames += reply.message["filenames"]
dark_filenames = []
if dark:
# Dark exposure, if commanded.
command.info("Taking dark exposure.")
dark_cmd = await command.send_command(
"lvmscp", f"expose --dark -c {spectro} {exptime}"
)
await dark_cmd
if dark_cmd.status.did_fail:
return command.fail("Failed taking arc exposure.")
for reply in dark_cmd.replies:
if "filenames" in reply.message:
dark_filenames += reply.message["filenames"]
command.info(
focus={
"spectrograph": spectro,
"iteration": n + 1,
"side": side,
"exposures": filenames,
"darks": dark_filenames,
}
)
# Reopen HDs.
command.info("Reopening Hartmann doors.")
if not (await move_hds(command, spectro, "all", "open", verbose=False)):
return
command.finish()
|
[
"gallegoj@uw.edu"
] |
gallegoj@uw.edu
|
c58b535953582454e0c1d3cc1dbbab28db5bf736
|
4a8c1f7d9935609b780aff95c886ef7781967be0
|
/atcoder/AOJ/id1130.py
|
4af6ca8e627dfbd5e251b8aee1fa908bddc0e8e3
|
[] |
no_license
|
recuraki/PythonJunkTest
|
d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a
|
2556c973d468a6988d307ce85c5f2f8ab15e759a
|
refs/heads/master
| 2023-08-09T17:42:21.875768
| 2023-07-18T23:06:31
| 2023-07-18T23:06:31
| 13,790,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,682
|
py
|
def do():
while True:
from collections import deque
w, h = map(int, input().split())
if w == h == 0: break
maze = []
curh, curw = -1, -1
maze.append("#" * (w+2))
for hh in range(h):
l = list("#" + input() + "#")
if l.count("@"): curh, curw = hh + 1, l.index("@")
maze.append(l)
maze.append("#" * (w+2))
maze[curh][curw] = "."
q = deque([(curh, curw)])
dh = [-1, 0, 0, 1]
dw = [0, -1, 1, 0]
ans = 0
while len(q) > 0:
curh, curw = q.popleft()
if maze[curh][curw] == "#": continue
maze[curh][curw] = "#"
ans += 1
for di in range(len(dh)):
nexth, nextw = curh + dh[di], curw + dw[di]
if maze[nexth][nextw] == "#": continue
q.append( (nexth, nextw) )
print(ans)
def do2():
dh = [-1, 0, 0, 1]
dw = [0, -1, 1, 0]
while True:
w, h = map(int, input().split())
if w == h == 0: break
maze = []
curh, curw = -1, -1
maze.append("#" * (w + 2))
for hh in range(h):
l = list("#" + input() + "#")
if l.count("@"): curh, curw = hh + 1, l.index("@")
maze.append(l)
maze.append("#" * (w + 2))
def search(h, w):
if maze[h][w] == "#": return 0
ans = 1
maze[h][w] = "#"
for di in range(len(dh)):
ans += search(h + dh[di], w + dw[di])
return ans
print(search(curh, curw))
#do()
do2()
|
[
"kanai@wide.ad.jp"
] |
kanai@wide.ad.jp
|
7f31e3454fffeba9d60042466e01d28db7bf7dcd
|
7cbcef1abbc76c43e2dd094bfe51f81fba8b0e9a
|
/03_Computer_Vision_OpenCV/01_Document_Scanner/01_document_scanner.py
|
c09f13abf132d8705185ace2110593b342c9f948
|
[] |
no_license
|
SimonSlominski/Data_Science
|
10fd5ca3bba8718b19804200c8f14e241e1e78b2
|
5cab52be83effc9e0b9a86888cedcd836dd00980
|
refs/heads/master
| 2021-05-17T11:19:11.509588
| 2020-06-15T13:44:47
| 2020-06-15T13:44:47
| 250,752,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,914
|
py
|
"""
Detection problems may occur if the background of the image is bright
"""
from numpy.linalg import norm
from skimage.filters import threshold_local
import numpy as np
import imutils
import cv2
image = cv2.imread('images/paragon_1.jpg')
# Image size standardization
# Keep a copy of the original image for later transformations
original_image = image.copy()
# Keep the original image's aspect ratio
ratio = image.shape[0] / 500.0
# Resize up to 500 px. From (600, 450, 3) to (500m 375, 3)
image = imutils.resize(image, height=500)
# Image conversion to grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Edge detection
edges = cv2.Canny(gray_image, threshold1=75, threshold2=200)
# Add blur
gray_image = cv2.GaussianBlur(gray_image, ksize=(5, 5), sigmaX=0)
# Find contours
contours = cv2.findContours(image=edges.copy(),
mode=cv2.RETR_LIST,
method=cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key=cv2.contourArea, reverse=True)[:5]
# Finding the document outline
screen_contour = None
for contour in contours:
# calculate the perimeter of each figure found
perimeter = cv2.arcLength(curve=contour, closed=True)
# approximation of the rectangle curve
approx = cv2.approxPolyDP(curve=contour, epsilon=0.02 * perimeter, closed=True)
if len(approx) == 4:
screen_contour = approx
break
# Display found vertices
# vertices = cv2.drawContours(image, contours=screen_contour, contourIdx=-1, color=(0, 255, 0), thickness=10)
# Extraction of vertices
points = screen_contour.reshape(4, 2)
points = points * ratio
# Create empty numpy array
rectangle = np.zeros((4, 2), dtype='float32')
total = points.sum(axis=1)
rectangle[0] = points[np.argmin(total)]
rectangle[2] = points[np.argmax(total)]
difference = np.diff(points, axis=1)
rectangle[1] = points[np.argmin(difference)]
rectangle[3] = points[np.argmax(difference)]
a, b, c, d = rectangle
width1 = norm(c - d)
width2 = norm(b - a)
max_width = max(int(width1), int(width2))
height1 = norm(b - c)
height2 = norm(a - d)
max_height = max(int(height1), int(height2))
vertices = np.array([
[0, 0],
[max_width -1, 0],
[max_width -1, max_height - 1],
[0, max_height - 1]
], dtype='float32')
# Transformation matrix 3x3
M = cv2.getPerspectiveTransform(rectangle, vertices)
# Transfer of document to image
out = cv2.warpPerspective(src=original_image, M=M, dsize=(max_width, max_height))
# To grayscale
out = cv2.cvtColor(out, cv2.COLOR_RGB2GRAY)
# Calculation of the threshold mask based on the proximity of pixels
T = threshold_local(image=out, block_size=11, offset=10, method='gaussian')
out = (out > T).astype('uint8') * 255
cv2.imshow('img', out)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"simon.slominski@gmail.com"
] |
simon.slominski@gmail.com
|
f74cc4930dcd25bf2e86b5bc9d77e8b29a9297ba
|
d5be74d2de6fa0ded61d6c3ee7c91a403c0f90db
|
/quantarhei/qm/liouvillespace/rates/foersterrates.py
|
36cb1e2bc487d09ec05efb6449d60fc918149f64
|
[
"MIT"
] |
permissive
|
tmancal74/quantarhei
|
43cf9d4be857b8e6db1274ebb8a384f1545cd9ad
|
fa3042d809005d47106e53609e6a63aa780c477c
|
refs/heads/master
| 2023-05-11T06:57:36.368595
| 2023-05-02T13:10:18
| 2023-05-02T13:10:18
| 63,804,925
| 20
| 22
|
MIT
| 2022-12-21T14:10:00
| 2016-07-20T18:30:25
|
Python
|
UTF-8
|
Python
| false
| false
| 5,021
|
py
|
# -*- coding: utf-8 -*-
import numpy
import scipy.interpolate as interp
from ...hilbertspace.hamiltonian import Hamiltonian
from ...liouvillespace.systembathinteraction import SystemBathInteraction
from ...corfunctions.correlationfunctions import c2g
class FoersterRateMatrix:
"""Förster relaxation rate matrix
Förster population relaxation rate matrix is calculated from the
Hamiltonian and system-system bath interation.
Parameters
----------
ham : Hamiltonian
Hamiltonian object
sbi : SystemBathInteraction
SystemBathInteraction object
initialize : bool (default True)
If true, the rates will be calculated when the object is created
cutoff_time : float
If cutoff time is specified, the tensor is integrated only up to the
cutoff time
"""
def __init__(self, ham, sbi, initialize=True, cutoff_time=None):
if not isinstance(ham, Hamiltonian):
raise Exception("First argument must be a Hamiltonian")
if not isinstance(sbi, SystemBathInteraction):
raise Exception("Second argument must be a SystemBathInteraction")
self._is_initialized = False
self._has_cutoff_time = False
if cutoff_time is not None:
self.cutoff_time = cutoff_time
self._has_cutoff_time = True
self.ham = ham
self.sbi = sbi
if initialize:
self.initialize()
self._is_initialized = True
def initialize(self):
HH = self.ham.data
Na = self.ham.dim
sbi = self.sbi
tt = sbi.TimeAxis.data
# line shape functions
gt = numpy.zeros((Na, sbi.TimeAxis.length),
dtype=numpy.complex64)
# SBI is defined with "sites"
for ii in range(1, Na):
gt[ii,:] = c2g(sbi.TimeAxis, sbi.CC.get_coft(ii-1,ii-1))
# reorganization energies
ll = numpy.zeros(Na)
for ii in range(1, Na):
ll[ii] = sbi.CC.get_reorganization_energy(ii-1,ii-1)
self.data = _reference_implementation(Na, HH, tt, gt, ll)
def _reference_implementation(Na, HH, tt, gt, ll):
"""Reference implementation of Foerster rates
Calculate the rates between specified sites using standard Foerster
theory.
Reference:
L. Valkunas, D. Abramavicius, and T. Mančal, Molecular Excitation
Dynamics and Relaxation, Wiley-VCH, Berlin (2013), page:
Parameters
----------
Na : integer
Number of sites in the problem (rank of the rate matrix)
HH : float array
Hamiltonian matrix
tt : float array
Time points in which the line shape functions are given
gt : complex array
Line shape functions values at give time points.
First index corresponds to the site, the second to the time point
ll : array
Reorganization energies on sites
Returns
-------
KK : float array
Rate matrix with zeros on the diagonal
"""
#
# Rates between states a and b
#
KK = numpy.zeros((Na,Na), dtype=numpy.float64)
for a in range(Na):
for b in range(Na):
if a != b:
ed = HH[b,b] # donor
ea = HH[a,a] # acceptor
KK[a,b] = (HH[a,b]**2)*_fintegral(tt, gt[a,:], gt[b,:],
ed, ea, ll[b])
#
# depopulation rates
#
Kaa = 0.0
for a in range(Na):
Kaa = numpy.sum(KK[:,a])
KK[a,a] = -Kaa
return KK
def _fintegral(tt, gtd, gta, ed, ea, ld):
"""Foerster integral
Parameters
----------
tt : numpy array
Time
gtd : numpy array
lineshape function of the donor transition
gta : numpy array
lineshape function of the acceptor transition
ed : float
Energy of the donor transition
ea : float
Energy of the acceptor transition
ld : float
Reorganization energy of the donor
Returns
-------
ret : float
The value of the Foerster integral
"""
#fl = numpy.exp(-gtd +1j*(ed-2.0*ld)*tm.data)
#ab = numpy.exp(-gta -1j*ea*tm.data)
#prod = ab*fl
prod = numpy.exp(-gtd-gta +1j*((ed-ea)-2.0*ld)*tt)
preal = numpy.real(prod)
pimag = numpy.imag(prod)
splr = interp.UnivariateSpline(tt,
preal, s=0).antiderivative()(tt)
spli = interp.UnivariateSpline(tt,
pimag, s=0).antiderivative()(tt)
hoft = splr + 1j*spli
ret = 2.0*numpy.real(hoft[len(tt)-1])
return ret
|
[
"tmancal74@gmail.com"
] |
tmancal74@gmail.com
|
8ed1850e0eac4651d703f9597c4758f2e6abce1e
|
4309919e2361b3e6364fac19fed1e5c40bb6a038
|
/yowsup_celery/exceptions.py
|
8d667832be294372d4000c9d7a62cb8956c25b17
|
[
"ISC"
] |
permissive
|
astamiviswakarma/yowsup-celery
|
85b0422a393afa2b3aebb94198bc0a4812b237ed
|
75f8edf8832ab1c3370e58a049a2e74e1691276e
|
refs/heads/master
| 2021-07-18T14:41:48.695265
| 2019-01-23T19:25:07
| 2019-01-23T19:25:07
| 135,594,100
| 0
| 0
|
ISC
| 2018-09-13T08:11:39
| 2018-05-31T14:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 710
|
py
|
# -*- coding: utf-8 -*-
class YowsupCeleryError(Exception):
pass
class UnexpectedError(YowsupCeleryError):
""" Raised for unknown or unexpected errors. """
pass
class ConfigurationError(YowsupCeleryError):
"""
Raised when YowsupStack detects and error in configurations
"""
pass
class ConnectionError(YowsupCeleryError):
"""
Raised when CeleryLayer tries to perform an action which requires to be
connected to WhatsApp
"""
pass
class AuthenticationError(YowsupCeleryError):
"""
Raised when YowsupStack cannot authenticate with the whatsapp. This means the
password for number is incorrect. Check if registration was correct
"""
pass
|
[
"jlmadurga@gmail.com"
] |
jlmadurga@gmail.com
|
1717287b96485ca0b2e569fabca1eae0984ef0fa
|
647efc6a8ab5511e30cccb1f3f3af697acc83bcc
|
/queue_sample.py
|
c94eb18928417f75008174b195a16175af637880
|
[] |
no_license
|
syuuhei-yama/python_01
|
5e244160b12e4023637220e0cfa4a1318f70d265
|
e94bc7d9c27bc3ae9cc66a7f87e2de13cc5efaae
|
refs/heads/master
| 2022-12-02T20:14:08.550739
| 2020-08-24T08:46:18
| 2020-08-24T08:46:18
| 289,867,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
#Queue
from queue import Queue
q = Queue(maxsize=3)
print(q.qsize())
print(q.queue)
print(q.empty())
print(q.full())
q.put('A')
q.put('B')
q.put('C')
q.put_nowait('D')
print(q.qsize())
print(q.queue)
print(q.empty())
print(q.full())
#var = q.get()
#print(var)
#print(q.queue)
#var = q.get()
#var = q.get()
#var = q.get()
#print('処理終了')
|
[
"syuuhei0615@icloud.com"
] |
syuuhei0615@icloud.com
|
ccb008ca1217d6ee113dab2c527a5a986495c0e0
|
ab4b08284590c3dd2d09f7de2adc35943a3b59f9
|
/yqc_huoerguosi_spider/yqc_huoerguosi_spider/settings.py
|
6527b1acc25a876e774a16bc5715d8646f22ac3b
|
[] |
no_license
|
james-hadoop/JamesScrapy
|
bfe71dc837a2cc61b5eee3b953f8a5b35a40820d
|
cbf3e230e919da1cfb76ba0d741440206c39e4cf
|
refs/heads/master
| 2020-08-28T23:46:49.816208
| 2020-06-28T23:54:09
| 2020-06-28T23:54:09
| 217,856,936
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,241
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for yqc_huoerguosi_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'yqc_huoerguosi_spider'
SPIDER_MODULES = ['yqc_huoerguosi_spider.spiders']
NEWSPIDER_MODULE = 'yqc_huoerguosi_spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'yqc_huoerguosi_spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'yqc_huoerguosi_spider.middlewares.YqcHuoerguosiSpiderSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'yqc_huoerguosi_spider.middlewares.YqcHuoerguosiSpiderDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'yqc_huoerguosi_spider.pipelines.YqcHuoerguosiSpiderPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"james@JamesUbuntu"
] |
james@JamesUbuntu
|
bdec9615902bc5bbbe3192556bcbc5bf9f5710d7
|
7f763d7c2289e0dcbcc01073f38ea11706736ed7
|
/HackerNews/plugin.py
|
27d9b8720aec36b4808d78d8e7003e3883d034b8
|
[] |
no_license
|
davidsedlar/LemongrabBot
|
ee3662a506dcbf6c6bfea0decd00044dd0e40ea9
|
37e18bc54554394ea3afa60fa168414e43ce0a99
|
refs/heads/master
| 2021-01-15T12:42:21.857132
| 2014-12-31T20:42:43
| 2014-12-31T20:42:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,080
|
py
|
import json
import urllib
import urllib2
import re
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
class HackerNews(callbacks.Plugin):
"""Add the help for "@plugin help HackerNews" here
This should describe *how* to use this plugin."""
threaded = True
def _shortenUrl(self, url):
posturi = "https://www.googleapis.com/urlshortener/v1/url"
headers = {'Content-Type' : 'application/json'}
data = {'longUrl' : url}
# if google news is up, safe to assume this is also up?
data = json.dumps(data)
request = urllib2.Request(posturi,data,headers)
response = urllib2.urlopen(request)
response_data = response.read()
shorturi = json.loads(response_data)['id']
return shorturi
# smart_truncate from http://stackoverflow.com/questions/250357/smart-truncate-in-python
def _smart_truncate(self, text, length, suffix='...'):
"""Truncates `text`, on a word boundary, as close to
the target length it can come.
"""
slen = len(suffix)
pattern = r'^(.{0,%d}\S)\s+\S+' % (length-slen-1)
if len(text) > length:
match = re.match(pattern, text)
if match:
length0 = match.end(0)
length1 = match.end(1)
if abs(length0+slen-length) < abs(length1+slen-length):
return match.group(0) + suffix
else:
return match.group(1) + suffix
return text
def hackernews(self, irc, msg, args, optlist):
"""[--newest|--latest|--best|--ask] type of headlines to display.
Display top hackernews.com headlines.
"""
hnposts = "latest"
#for (key, value) in optlist:
# if key == 'newest':
# hnposts = "newest"
# if key == 'latest':
# hnposts = "latest"
# if key == 'best':
# hnposts = "best"
# if key == 'ask':
# hnposts = "ask"
api_url = "http://hackernews-frontend.appspot.com/%s/format/json/limit/5" % hnposts
self.log.info(api_url)
response = urllib2.urlopen(api_url)
data = response.read().decode('latin-1')
jsondata = json.loads(data)
#self.log.info(json.dumps(jsondata, indent=2))
items = jsondata['items']
#entries = sorted(items, key=items['comments'], reverse=True)
for item in items:
title = item['title']
url = self._shortenUrl(item['url'])
score = item['score']
user = item['user']
comments = item['comments']
time = item['time']
item_id = item['item_id']
irc.reply(title + " " + url)
hackernews = wrap(hackernews, [getopts({'newest': '','latest': '','best': '','ask': ''})])
Class = HackerNews
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=200:
|
[
"smithers_mr@yahoo.com"
] |
smithers_mr@yahoo.com
|
39475ae26aaa932b43d254a8b348976f7b7b4d1b
|
c4a046a62e933d72d3404787429d0840517ae9bd
|
/sandbox/gkahn/gcg/envs/env_utils.py
|
4501126847636df8b691476716de4cc38ea5e34e
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
JasonTOKO/gcg
|
6d1cff2307e1bae6790357ea569ed4cca594eb1d
|
e48c5cf47bfbc879c9477a8c98b3b108d43413af
|
refs/heads/gcg_release
| 2020-04-29T02:30:28.133570
| 2017-12-26T17:06:14
| 2017-12-26T17:06:14
| 175,770,942
| 1
| 0
|
NOASSERTION
| 2019-03-15T07:32:36
| 2019-03-15T07:32:35
| null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
from rllab.misc.ext import set_seed
### environments
import gym
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.envs.normalized_env import normalize
def create_env(env_str, is_normalize=True, seed=None):
from rllab.envs.gym_env import GymEnv, FixedIntervalVideoSchedule
from sandbox.gkahn.gcg.envs.rccar.square_env import SquareEnv
from sandbox.gkahn.gcg.envs.rccar.square_cluttered_env import SquareClutteredEnv
from sandbox.gkahn.gcg.envs.rccar.cylinder_env import CylinderEnv
inner_env = eval(env_str)
if is_normalize:
inner_env = normalize(inner_env)
env = TfEnv(inner_env)
# set seed
if seed is not None:
set_seed(seed)
if isinstance(inner_env, GymEnv):
inner_env.env.seed(seed)
return env
|
[
"gkahn13@gmail.com"
] |
gkahn13@gmail.com
|
0984a460f47fe467dc4b1b0a1b5591fed90dc568
|
db7a459e31c0a186dca64a829f93090fa58feab0
|
/ai_learning/data_structure/sort05_quick.py
|
1d18ed1f5e05491e090069d04ec5228d88187b18
|
[] |
no_license
|
ZouJoshua/dl_project
|
a3e7c9e035c37af698d4ef388fbb8c46174d5de1
|
ee7ecedd55ce544b127be8009e026ac2cdc3f71b
|
refs/heads/master
| 2022-12-04T04:21:19.937698
| 2022-01-27T07:33:37
| 2022-01-27T07:33:37
| 175,645,793
| 9
| 3
| null | 2022-11-21T21:30:23
| 2019-03-14T15:07:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,001
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : Joshua
@Time : 4/27/20 1:47 PM
@File : sort05_quick.py
@Desc : 快速排序
"""
def quick_sort(alist, l, r):
"""
快速排序算法
平均时间复杂度O(nlogn)
最优时间复杂度(序列是有序的O(n))
最坏时间复杂度O(n^2)
稳定
:param alist:
:param left:
:param right:
:return:
"""
if l >= r:
return
mid_value = alist[l]
left = l
right = r
while left < right:
while left < right and alist[right] >= mid_value:
right -= 1
alist[left] = alist[right]
while left < right and alist[left] < mid_value:
left += 1
alist[right] = alist[left]
# 从循环退出时left和right是相等的
alist[left] = mid_value
quick_sort(alist, l, left-1)
quick_sort(alist, left+1, r)
def quick_sort_v1(alist, left, right):
"""
快速排序算法
平均时间复杂度:O(nlogn)
最优时间复杂度:(序列是有序的O(n))
最坏时间复杂度:O(n^2)
空间复杂度:O(logn)~O(n)
不稳定
:param alist:
:param left:
:param right:
:return:
"""
def quick(alist, left, right):
mid_value = alist[left]
while left < right:
while left < right and alist[right] >= mid_value:
right -= 1
alist[left] = alist[right]
while left < right and alist[left] < mid_value:
left += 1
alist[right] = alist[left]
# 从循环退出时left和right是相等的
alist[left] = mid_value
return left
if left < right:
mid_value = quick(alist, left, right)
quick_sort(alist, mid_value, mid_value-1)
quick_sort(alist, mid_value+1, right)
return alist
if __name__ == "__main__":
a = [3, 42, 5, 1, 55, 23, 44, 54, 32, 8, 10]
quick_sort(a, 0, len(a)-1)
# s = quick_sort_v1(a, 0, len(a)-1)
print(a)
|
[
"joshua_zou@163.com"
] |
joshua_zou@163.com
|
a5a9e4a77f543308bc1b3f321af2c9e0d305c91a
|
275bc864a84723d6767207573017c7258d60370c
|
/Refinement_based_extraction/Training_Functions.py
|
61a9c272c05e1b2fde13837fa44901eace8ab83e
|
[
"MIT"
] |
permissive
|
DES-Lab/Extracting-FSM-From-RNNs
|
ccfa7286b3894fde9e30405fdd2089c54a0b650a
|
761b78aed155827b6bb6479daf17a144e7ec8560
|
refs/heads/master
| 2023-04-08T04:57:16.214037
| 2022-04-14T15:38:07
| 2022-04-14T15:38:07
| 357,165,036
| 15
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,688
|
py
|
from Refinement_based_extraction.Helper_Functions import n_words_of_length
def make_train_set_for_target(target,alphabet,lengths=None,max_train_samples_per_length=300,search_size_per_length=1000,provided_examples=None):
train_set = {}
if None is provided_examples:
provided_examples = []
if None is lengths:
lengths = list(range(15))+[15,20,25,30]
for l in lengths:
samples = [w for w in provided_examples if len(w)==l]
samples += n_words_of_length(search_size_per_length,l,alphabet)
pos = [w for w in samples if target(w)]
neg = [w for w in samples if not target(w)]
pos = pos[:int(max_train_samples_per_length/2)]
neg = neg[:int(max_train_samples_per_length/2)]
minority = min(len(pos),len(neg))
pos = pos[:minority+20]
neg = neg[:minority+20]
train_set.update({w:True for w in pos})
train_set.update({w:False for w in neg})
#print("made train set of size:",len(train_set),", of which positive examples:",
# len([w for w in train_set if train_set[w]==True]))
return train_set
#curriculum
def mixed_curriculum_train(rnn,train_set,outer_loops=3,stop_threshold=0.001,learning_rate=0.001,
length_epochs=5,random_batch_epochs=100,single_batch_epochs=100,random_batch_size=20, show = False):
lengths = sorted(list(set([len(w) for w in train_set])))
for _ in range(outer_loops):
for l in lengths:
training = {w:train_set[w] for w in train_set if len(w)==l}
if len(set([training[w] for w in training])) <= 1: #empty, or length with only one classification
continue
rnn.train_group(training,length_epochs,show=False,loss_every=20,stop_threshold=stop_threshold,
learning_rate=learning_rate,batch_size=None,print_time=False)
# all together but in batches
if rnn.finish_signal == rnn.train_group(train_set,random_batch_epochs,show=show,loss_every=20,
stop_threshold = stop_threshold,
learning_rate=learning_rate,
batch_size=random_batch_size,print_time=False):
break
# all together in one batch
if rnn.finish_signal == rnn.train_group(train_set,single_batch_epochs,show=show,loss_every=20,
stop_threshold = stop_threshold,
learning_rate=learning_rate,batch_size=None,print_time=False):
break
print("classification loss on last batch was:",rnn.all_losses[-1])
|
[
"edi.muskardin@silicon-austria.com"
] |
edi.muskardin@silicon-austria.com
|
53655fd4008f110145660387e8700896033eb634
|
08428ba80f90f73bbce19e5bd0f423a1b4d025d7
|
/src/project_requests/urls.py
|
0dc1335179831f78e8a61384807b73fa52521851
|
[] |
no_license
|
marcoverl/openstack-security-integrations
|
0d3afe093b361c548b65be9e405e10318d51c7cd
|
58c560885b007cf25444e552de17c0d6a5a0e716
|
refs/heads/master
| 2021-01-16T21:18:56.071490
| 2014-06-17T07:56:48
| 2014-06-17T07:56:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
from django.conf.urls.defaults import patterns
from django.conf.urls.defaults import url
from openstack_dashboard.dashboards.project.project_requests import views
prefix = 'openstack_dashboard.dashboards.project.project_requests.views'
urlpatterns = patterns(prefix,
url(r'^$', views.RequestView.as_view(), name='index'))
|
[
"paolo.andreetto@pd.infn.it"
] |
paolo.andreetto@pd.infn.it
|
603c1e8258762426c4676d2615db71ac122ca5f1
|
14ed6c8bf8f735bd08e7d9d3a06ab71b06335a82
|
/update.py
|
1d00a9614c9b3cef7b10fd6b2a83dd90174807ae
|
[] |
no_license
|
HackLB/garage_sales
|
8e9ef258c1fa5ec55e85e3819ce07f0ad077ae48
|
78b865ccbf40471ee45b4925b18da56968ee2f6c
|
refs/heads/master
| 2021-01-11T06:52:43.886156
| 2017-09-03T01:00:07
| 2017-09-03T01:00:07
| 72,356,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,596
|
py
|
#!/usr/bin/env python
import os, sys
import requests
from bs4 import BeautifulSoup
from pprint import pprint
import simplejson as json
import hashlib
from geopy.geocoders import Nominatim, GoogleV3
from geopy.exc import GeocoderTimedOut
with open('../secrets.json') as f:
secrets = json.load(f)
geolocator = GoogleV3(api_key=secrets['google_api_key'])
url = 'https://wwwbitprod1.longbeach.gov/GarageSalePermit/SearchByDate.aspx'
def getmd5(message):
"""
Returns MD5 hash of string passed to it.
"""
return hashlib.md5(message.encode('utf-8')).hexdigest()
def scrape_records():
"""
Extracts garage sale records from the city garage sale Web page,
then puts each record into a dictionary and returns a list of dictionaries.
"""
print('Getting garage sales data...')
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
rows = soup.find('table', {'class': 'DataWebControlStyle'}).find_all('tr')
records = []
for row in rows[1:]:
cells = row.find_all('td')
location = cells[0].string.strip()
dates = [cells[1].string.strip()]
record = {'location': location, 'dates': dates}
records.append(record)
pprint(record)
return records
def get_subdirectory(base_name):
"""
Takes the base filename and returns a path to a subdirectory, creating it if needed.
"""
sub_dir = os.path.join(data_path, base_name[-8:-6], base_name[-6:-4], base_name[-4:-2])
os.makedirs(sub_dir, exist_ok=True)
return sub_dir
def geocode(address_stub):
address = '{}, LONG BEACH, CA'.format(address_stub)
try:
location = geolocator.geocode(address, timeout=2)
if location:
return {"latitude": location.latitude, "longitude": location.longitude, "address": location.address}
else:
return None
except GeocoderTimedOut:
return geocode(address)
def save_records(records):
"""
Saves records to invidual JSON files.
Records are per-address. Each new garage sale for
a given address gets appended to its existing file.
Files are named and organized based on an MD5 of
the address.
"""
print('Saving garage sales data...')
for record in records:
location_hash = getmd5(record['location'])
file_name = '{}.json'.format(location_hash)
directory = get_subdirectory(location_hash)
path = os.path.join(directory, file_name)
if os.path.exists(path):
with open(path) as f:
existing_data = json.load(f)
if record['dates'][0] not in existing_data['dates']:
existing_data['dates'].extend(record['dates'])
with open(path, 'w') as f:
json.dump(existing_data, f, indent=4, ensure_ascii=False, sort_keys=True)
else:
geocoded_location = geocode(record['location'])
if geocoded_location:
record['coordinates'] = geocoded_location
with open(path, 'w') as f:
json.dump(record, f, indent=4, ensure_ascii=False, sort_keys=True)
if __name__ == "__main__":
repo_path = os.path.dirname(os.path.realpath(sys.argv[0])) # Path to current directory
data_path = os.path.join(repo_path, '_data') # Root path for record data
os.makedirs(data_path, exist_ok=True)
records = scrape_records() # Scrape garage sale records...
save_records(records) # Save the scraped records to JSON files...
|
[
"rogerhoward@mac.com"
] |
rogerhoward@mac.com
|
93c5bfc63f145f5c6a7b441e8bc989771e798849
|
7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a
|
/adspygoogle/dfp/zsi/v201010/LineItemCreativeAssociationService_services.py
|
a543d1f0478ee699edbcb3c5695e3c57cc42665a
|
[
"Apache-2.0"
] |
permissive
|
hockeyprincess/google-api-dfp-python
|
534519695ffd26341204eedda7a8b50648f12ea9
|
efa82a8d85cbdc90f030db9d168790c55bd8b12a
|
refs/heads/master
| 2021-01-10T10:01:09.445419
| 2011-04-14T18:25:38
| 2011-04-14T18:25:38
| 52,676,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,486
|
py
|
##################################################
# LineItemCreativeAssociationService_services.py
# generated by ZSI.generate.wsdl2python
##################################################
from LineItemCreativeAssociationService_services_types import *
import urlparse, types
from ZSI.TCcompound import ComplexType, Struct
from ZSI import client
import ZSI
# Locator
class LineItemCreativeAssociationServiceLocator:
LineItemCreativeAssociationServiceInterface_address = "https://www.google.com:443/apis/ads/publisher/v201010/LineItemCreativeAssociationService"
def getLineItemCreativeAssociationServiceInterfaceAddress(self):
return LineItemCreativeAssociationServiceLocator.LineItemCreativeAssociationServiceInterface_address
def getLineItemCreativeAssociationServiceInterface(self, url=None, **kw):
return LineItemCreativeAssociationServiceSoapBindingSOAP(url or LineItemCreativeAssociationServiceLocator.LineItemCreativeAssociationServiceInterface_address, **kw)
# Methods
class LineItemCreativeAssociationServiceSoapBindingSOAP:
def __init__(self, url, **kw):
kw.setdefault("readerclass", None)
kw.setdefault("writerclass", None)
# no resource properties
self.binding = client.Binding(url=url, **kw)
# no ws-addressing
# op: createLineItemCreativeAssociation
def createLineItemCreativeAssociation(self, request):
if isinstance(request, createLineItemCreativeAssociationRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(createLineItemCreativeAssociationResponse.typecode)
return response
# op: createLineItemCreativeAssociations
def createLineItemCreativeAssociations(self, request):
if isinstance(request, createLineItemCreativeAssociationsRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(createLineItemCreativeAssociationsResponse.typecode)
return response
# get: getLineItemCreativeAssociation
def getLineItemCreativeAssociation(self, request):
if isinstance(request, getLineItemCreativeAssociationRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(getLineItemCreativeAssociationResponse.typecode)
return response
# get: getLineItemCreativeAssociationsByStatement
def getLineItemCreativeAssociationsByStatement(self, request):
if isinstance(request, getLineItemCreativeAssociationsByStatementRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(getLineItemCreativeAssociationsByStatementResponse.typecode)
return response
# op: performLineItemCreativeAssociationAction
def performLineItemCreativeAssociationAction(self, request):
if isinstance(request, performLineItemCreativeAssociationActionRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(performLineItemCreativeAssociationActionResponse.typecode)
return response
# op: updateLineItemCreativeAssociation
def updateLineItemCreativeAssociation(self, request):
if isinstance(request, updateLineItemCreativeAssociationRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(updateLineItemCreativeAssociationResponse.typecode)
return response
# op: updateLineItemCreativeAssociations
def updateLineItemCreativeAssociations(self, request):
if isinstance(request, updateLineItemCreativeAssociationsRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(updateLineItemCreativeAssociationsResponse.typecode)
return response
createLineItemCreativeAssociationRequest = ns0.createLineItemCreativeAssociation_Dec().pyclass
createLineItemCreativeAssociationResponse = ns0.createLineItemCreativeAssociationResponse_Dec().pyclass
createLineItemCreativeAssociationsRequest = ns0.createLineItemCreativeAssociations_Dec().pyclass
createLineItemCreativeAssociationsResponse = ns0.createLineItemCreativeAssociationsResponse_Dec().pyclass
getLineItemCreativeAssociationRequest = ns0.getLineItemCreativeAssociation_Dec().pyclass
getLineItemCreativeAssociationResponse = ns0.getLineItemCreativeAssociationResponse_Dec().pyclass
getLineItemCreativeAssociationsByStatementRequest = ns0.getLineItemCreativeAssociationsByStatement_Dec().pyclass
getLineItemCreativeAssociationsByStatementResponse = ns0.getLineItemCreativeAssociationsByStatementResponse_Dec().pyclass
performLineItemCreativeAssociationActionRequest = ns0.performLineItemCreativeAssociationAction_Dec().pyclass
performLineItemCreativeAssociationActionResponse = ns0.performLineItemCreativeAssociationActionResponse_Dec().pyclass
updateLineItemCreativeAssociationRequest = ns0.updateLineItemCreativeAssociation_Dec().pyclass
updateLineItemCreativeAssociationResponse = ns0.updateLineItemCreativeAssociationResponse_Dec().pyclass
updateLineItemCreativeAssociationsRequest = ns0.updateLineItemCreativeAssociations_Dec().pyclass
updateLineItemCreativeAssociationsResponse = ns0.updateLineItemCreativeAssociationsResponse_Dec().pyclass
|
[
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] |
api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138
|
dadb74301dd4de7542922d6102444f2aa9f68ad5
|
dbeb1e145eba012a200073038d8a8965ae0c6f5d
|
/Visualize/surface_3d.py
|
167ea594250b9f07a7afab12a5d1e0b9efa58eff
|
[] |
no_license
|
hellJane/Python_DataAnalysis
|
b7027cb9d8e75a98b5626a58ee85b64f62c54c9c
|
966ee5d732e074e9d124333f13d3e3e23ade1edc
|
refs/heads/master
| 2021-05-17T01:57:24.092791
| 2017-12-01T15:32:32
| 2017-12-01T15:32:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,397
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
'''
这个用的比较多,写一下常用参数的含义:
X, Y, Z: 显然这是数据样本点的参数,这里的格式就需要跟contour()一样了,都是2D的数组,即meshgrid
rstride: 绘制表面的行间距
cstride: 绘制表面的列间距
rcount: 绘制表面的行上限
ccount: 绘制表面的列上限
color: 颜色
cmap: 渐变的颜色
....余下几个不常用的不列举出来了
'''
fig = plt.figure()
ax = fig.gca(projection='3d') # 除了add_subplot(111, projection='3d')的另一种方法
# Generate Data
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y) # 必须是meshgrid
R = np.sqrt(X**2 + Y**2)
Z = np.sin(R) # X, Y, Z都是2D数组
# surf = ax.plot_surface(X, Y, Z, color='r') # 表面全是红色
# surf = ax.plot_surface(X, Y, Z, cmap='jet') # 表面是cmap定义的变化的颜色,Z值越高,颜色越暖,基本上
surf = ax.plot_surface(X, Y, Z, cmap='jet') # 抹去每个小方格之间的界线
ax.set_zlim(-1.01, 1.01)
ax.zaxis.set_major_locator(LinearLocator(10)) # 完全按照zlim来等分10个ticks
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) # 每个tick保留两位小数
plt.show()
|
[
"noreply@github.com"
] |
hellJane.noreply@github.com
|
2ec6b6118058d57111beec5e02205c2fd1ddff50
|
5cc8c3690f2398698d78800734f7d1ba5dc5a515
|
/notebooks/loader.py
|
d2725ef5eb97333649f1520c70fcefb1cbf8eed4
|
[
"MIT"
] |
permissive
|
kaiyingshan/ode-solver
|
0094f06e1bb9f265517b4befec0c04bd3a9d9407
|
30c6798efe9c35a088b2c6043493470701641042
|
refs/heads/master
| 2020-04-23T00:39:22.148183
| 2019-05-07T17:35:34
| 2019-05-07T17:35:34
| 170,787,367
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
|
[
"hanzhi713@163.com"
] |
hanzhi713@163.com
|
6bc942a94a76a3549acab8a58b09d349fd303f10
|
8ae07790f074439a329f55f3ed3408e2ba775a74
|
/Desktop/packages/rmutil/UnixDriveDetector.py
|
6097e90559c3ec61aeda0a70120d58db5927bc00
|
[
"Apache-2.0"
] |
permissive
|
peter9teufel/usb-kiosk
|
cf06bc7c612b99860f8e1109a293c24e10e7a016
|
be931bfe79636e8280748e06d29a4c86af423478
|
refs/heads/master
| 2020-04-06T04:10:04.711126
| 2015-02-20T11:43:38
| 2015-02-20T11:43:38
| 21,199,497
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,160
|
py
|
import threading, sys, os, time, platform, getpass
import wx
if platform.system() == "Linux":
from wx.lib.pubsub import setupkwargs
from wx.lib.pubsub import pub as Publisher
else:
from wx.lib.pubsub import pub as Publisher
if platform.system() == "Linux":
if 'fedora' in platform.dist():
user = getpass.getuser()
VOLUMES_PATH = "/run/media/" + user
else:
VOLUMES_PATH = "/media"
else:
VOLUMES_PATH = "/Volumes"
bg_thread = None
runFlag = True
volumes = None
def waitForUSBDrive():
# load current list of volumes
global volumes
volumes = os.listdir(VOLUMES_PATH)
global bg_thread
bg_thread = BackgroundUSBDetection()
bg_thread.daemon = True
bg_thread.start()
bg_thread.join()
# RESULT CALL --> wx.CallAfter(Publisher.sendMessage, 'usb_connected', path=drive_path)
### THREAD FOR ASYNC USB DETECTION ###
class BackgroundUSBDetection(threading.Thread):
def __init__(self):
self.run_event = threading.Event()
threading.Thread.__init__(self, name="Mac_Drive_Detector")
def run(self):
print "Thread started..."
global runFlag, volumes
tries = 0
while runFlag and tries < 10:
# check volumes
curVols = os.listdir(VOLUMES_PATH)
newVol = self.NewVolumes(volumes, curVols)
# update list of volumes in case a volume was disconnected (e.g. retry plugging USB)
volumes = curVols
if len(newVol) > 0:
wx.CallAfter(Publisher.sendMessage, 'usb_connected', path=VOLUMES_PATH + '/' + newVol[0])
runFlag = False
time.sleep(2)
tries += 1
if tries == 10:
# not found --> send timout message
wx.CallAfter(Publisher.sendMessage, 'usb_search_timeout')
def NewVolumes(self, oldVolumes, curVolumes):
newVol = []
for volume in curVolumes:
if not volume in oldVolumes:
newVol.append(volume)
return newVol
if __name__=='__main__':
# load current list of volumes
volumes = os.listdir(VOLUMES_PATH)
waitForUSBDrive()
|
[
"peter9teufel@gmail.com"
] |
peter9teufel@gmail.com
|
9042aa99583f972e7c0f07daa53deb9a89199f8c
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2020_07_01_preview/_policy_client.py
|
581050251e24acfacfc172e0aca18058b3633016
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,057
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import PolicyClientConfiguration
from .operations import PolicyExemptionsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class PolicyClient: # pylint: disable=client-accepts-api-version-keyword
"""To exempt your resources from policy evaluation and non-compliance state, you can create an
exemption at a scope.
:ivar policy_exemptions: PolicyExemptionsOperations operations
:vartype policy_exemptions:
azure.mgmt.resource.policy.v2020_07_01_preview.operations.PolicyExemptionsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2020-07-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = PolicyClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.policy_exemptions = PolicyExemptionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "PolicyClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details: Any) -> None:
self._client.__exit__(*exc_details)
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
6757af7cac4bdda23747b34a7f1f13a843f4ffea
|
8ce656578e04369cea75c81b529b977fb1d58d94
|
/bank_guarantee/migrations/0037_auto_20200221_1322.py
|
f40f4f6609c6fc8c2adbea5af7ea5f848ca4d2e0
|
[] |
no_license
|
JJvzd/django_exp
|
f9a08c40a6a7535777a8b5005daafe581d8fe1dc
|
b1df4681e67aad49a1ce6426682df66b81465cb6
|
refs/heads/master
| 2023-05-31T13:21:24.178394
| 2021-06-22T10:19:43
| 2021-06-22T10:19:43
| 379,227,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
# Generated by Django 2.1.7 on 2020-02-21 10:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bank_guarantee', '0036_auto_20200131_0005'),
]
operations = [
migrations.AlterField(
model_name='requeststatus',
name='code',
field=models.CharField(blank=True, max_length=30, null=True, unique=True, verbose_name='Код'),
),
]
|
[
"javad@MacBook-Pro-Namig.local"
] |
javad@MacBook-Pro-Namig.local
|
938b1412931f54fefe25078052ee0bc92effebf3
|
9cd180fc7594eb018c41f0bf0b54548741fd33ba
|
/sdk/python/pulumi_azure_nextgen/storage/v20181101/get_blob_container_immutability_policy.py
|
ec61eb93d9aab5985bfca2abc2f0d65f0e8c4e2e
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
MisinformedDNA/pulumi-azure-nextgen
|
c71971359450d03f13a53645171f621e200fe82d
|
f0022686b655c2b0744a9f47915aadaa183eed3b
|
refs/heads/master
| 2022-12-17T22:27:37.916546
| 2020-09-28T16:03:59
| 2020-09-28T16:03:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,604
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetBlobContainerImmutabilityPolicyResult',
'AwaitableGetBlobContainerImmutabilityPolicyResult',
'get_blob_container_immutability_policy',
]
@pulumi.output_type
class GetBlobContainerImmutabilityPolicyResult:
"""
The ImmutabilityPolicy property of a blob container, including Id, resource name, resource type, Etag.
"""
def __init__(__self__, etag=None, immutability_period_since_creation_in_days=None, name=None, state=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if immutability_period_since_creation_in_days and not isinstance(immutability_period_since_creation_in_days, int):
raise TypeError("Expected argument 'immutability_period_since_creation_in_days' to be a int")
pulumi.set(__self__, "immutability_period_since_creation_in_days", immutability_period_since_creation_in_days)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
Resource Etag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="immutabilityPeriodSinceCreationInDays")
def immutability_period_since_creation_in_days(self) -> int:
"""
The immutability period for the blobs in the container since the policy creation, in days.
"""
return pulumi.get(self, "immutability_period_since_creation_in_days")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> str:
"""
The ImmutabilityPolicy state of a blob container, possible values include: Locked and Unlocked.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
"""
return pulumi.get(self, "type")
class AwaitableGetBlobContainerImmutabilityPolicyResult(GetBlobContainerImmutabilityPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBlobContainerImmutabilityPolicyResult(
etag=self.etag,
immutability_period_since_creation_in_days=self.immutability_period_since_creation_in_days,
name=self.name,
state=self.state,
type=self.type)
def get_blob_container_immutability_policy(account_name: Optional[str] = None,
container_name: Optional[str] = None,
immutability_policy_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBlobContainerImmutabilityPolicyResult:
"""
Use this data source to access information about an existing resource.
:param str account_name: The name of the storage account within the specified resource group. Storage account names must be between 3 and 24 characters in length and use numbers and lower-case letters only.
:param str container_name: The name of the blob container within the specified storage account. Blob container names must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter or number.
:param str immutability_policy_name: The name of the blob container immutabilityPolicy within the specified storage account. ImmutabilityPolicy Name must be 'default'
:param str resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['containerName'] = container_name
__args__['immutabilityPolicyName'] = immutability_policy_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:storage/v20181101:getBlobContainerImmutabilityPolicy', __args__, opts=opts, typ=GetBlobContainerImmutabilityPolicyResult).value
return AwaitableGetBlobContainerImmutabilityPolicyResult(
etag=__ret__.etag,
immutability_period_since_creation_in_days=__ret__.immutability_period_since_creation_in_days,
name=__ret__.name,
state=__ret__.state,
type=__ret__.type)
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
29063ff6540ddec2aa06c9e35f1bc7a3b64d8b2e
|
8a3e7b779676e396853dc1fb22525e501050cffb
|
/geoist/vis/gui.py
|
118bd9e8fa0d14501a4eb9f0a7269d00960258ab
|
[
"MIT"
] |
permissive
|
CHEN-Zhaohui/geoist
|
3a8218105b8bd21d23f3e15e3d20397adf8f571d
|
06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b
|
refs/heads/master
| 2021-03-31T19:19:04.472355
| 2020-03-18T03:18:04
| 2020-03-18T03:18:04
| 248,126,521
| 0
| 0
|
MIT
| 2020-03-18T03:07:54
| 2020-03-18T03:07:53
| null |
UTF-8
|
Python
| false
| false
| 4,325
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 10 18:25:58 2019
@author: chens
"""
from tkinter import *
import os
#from simpledialog import simpledialog
import gimodule
gimodule.maxwidth = 140
# Since the interface now has to columns of buttons this must be wider
## Constants
programname = "MagTools APIs - Geomagnetic reference field models"
version = "0.1"
## Starting the program and creating classes
class App:
def __init__(self, master):
frame = Frame(master)
frame.grid()
mainLabel = gimodule.mainline(frame, "定向钻探(NWD)参考地磁场模型计算接口程序", version)
gimodule.seperator_line(frame,10, "获得地磁场信息")
covfit = gimodule.LauncherButton(frame,11,18,0,"最近地磁台",
lambda:[covfit.run("covfit.py")], "API: http://0.0.0.0/magv1/nearestMagSta?")
empcov = gimodule.LauncherButton(frame,11,18,1,"指定范围地磁台", lambda:
[empcov.run("empcov.py")], "API: http://0.0.0.0/magv1/selMagSta?")
geocol = gimodule.LauncherButton(frame,12,18,0,"地磁分量转换", lambda:
[geocol.run("geocol.py")], "API: xyz2hdi/hdi2xyz")
tc = gimodule.LauncherButton(frame,12,18,1,"模型解算最优时变", lambda:
[tc.run("tc.py")], "API: magts")
gimodule.seperator_line(frame,40, "主磁场+岩石圈磁场:EMM2015地磁模型接口")
geogrid = gimodule.LauncherButton(frame,41,18,0,"模型解算单点",
lambda: [geogrid.run("geogrid.py")], "API: emmpnt ")
geoip = gimodule.LauncherButton(frame,41,18,1,"模型解算网格", lambda:
[geoip.run("geoip.py")], "API: emmgrd")
geoegm = gimodule.LauncherButton(frame,42,18,0,"模型解算时间序列",
lambda:[geoegm.run("geoegm.py")], "API: emmts")
stokes = gimodule.LauncherButton(frame,42,18,1,"模型解算多点", lambda:
[stokes.run("stokes.py")], "API: emmpnts")
gimodule.seperator_line(frame,70, "主磁场1:IGRF12地磁模型接口")
geogrid = gimodule.LauncherButton(frame,71,18,0,"模型解算单点",
lambda: [geogrid.run("geogrid.py")], "API: igrfpnt ")
geoip = gimodule.LauncherButton(frame,71,18,1,"模型解算网格", lambda:
[geoip.run("geoip.py")], "API: igrfgrd")
geoegm = gimodule.LauncherButton(frame,72,18,0,"模型解算时间序列",
lambda:[geoegm.run("geoegm.py")], "API: igrfts")
stokes = gimodule.LauncherButton(frame,72,18,1,"模型解算多点", lambda:
[stokes.run("stokes.py")], "API: igrfpnts")
gimodule.seperator_line(frame,100, "主磁场2:WMM2015地磁模型接口")
geogrid = gimodule.LauncherButton(frame,101,18,0,"模型解算单点",
lambda: [geogrid.run("geogrid.py")], "API: wmmpnt ")
geoip = gimodule.LauncherButton(frame,101,18,1,"模型解算网格", lambda:
[geoip.run("geoip.py")], "API: wmmgrd")
geoegm = gimodule.LauncherButton(frame,102,18,0,"模型解算时间序列",
lambda:[geoegm.run("geoegm.py")], "API: wmmts")
stokes = gimodule.LauncherButton(frame,102,18,1,"模型解算多点", lambda:
[stokes.run("stokes.py")], "API: wmmpnts")
gimodule.seperator_line(frame,130, "电离层磁场:DIFI-4地磁模型接口")
geogrid = gimodule.LauncherButton(frame,131,18,0,"模型解算单点",
lambda: [geogrid.run("geogrid.py")], "API: difipnt ")
geoip = gimodule.LauncherButton(frame,131,18,1,"模型解算网格", lambda:
[geoip.run("geoip.py")], "API: difigrd")
geoegm = gimodule.LauncherButton(frame,132,18,0,"模型解算时间序列",
lambda:[geoegm.run("geoegm.py")], "API: dififts")
stokes = gimodule.LauncherButton(frame,132,18,1,"模型解算多点", lambda:
[stokes.run("stokes.py")], "API: difipnts")
gimodule.seperator_line(frame,gimodule.maxrow-2)
button = Button(frame, text="退出", width=8, command=frame.quit)
button.grid(row=gimodule.maxrow, column=0, sticky=W)
######################################################
## Initiate the program and start program loop
######################################################
root = Tk()
app = App(root)
root.title(programname)
root.mainloop()
|
[
"chenshi@cea-igp.ac.cn"
] |
chenshi@cea-igp.ac.cn
|
d345d8aabf75c5430dee2cf383acce3e60f7f9b0
|
b85ee3f4d308a3e0022938379fbcac3186d789bc
|
/hanishbot.py
|
fb412df1ad3c1929b3f3174963a995c346d7dc27
|
[
"MIT"
] |
permissive
|
waffle-iron/hanish
|
7f007f8e13237dea85a2c654d811836f0896f3e0
|
472afcb70ffe92517412c4211e6d44b51117922f
|
refs/heads/master
| 2021-01-20T14:39:59.114026
| 2017-05-08T15:32:25
| 2017-05-08T15:32:25
| 90,644,193
| 0
| 0
| null | 2017-05-08T15:32:24
| 2017-05-08T15:32:24
| null |
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
#!/usr/bin/env python
# hanishbot
# Primary entry point and application runner for the Hanish bot.
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Mon May 08 11:15:40 2017 -0400
#
# Copyright (C) 2016 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: hanishbot.py [] benjamin@bengfort.com $
"""
Primary entry point and application runner for the Hanish bot.
"""
##########################################################################
## Imports
##########################################################################
import dotenv
import hanish
import argparse
##########################################################################
## Command Arguments
##########################################################################
LOAD_DOTENV = True
DESCRIPTION = "A slackbot that makes small talk about the weather."
VERSION = "hanishbot v{}".format(hanish.get_version())
EPILOG = "Please report bugs or issues to github.com/bbengfort/hanish"
##########################################################################
## Main Method
##########################################################################
if __name__ == '__main__':
# Load the environment from the .env file
if LOAD_DOTENV:
dotenv.load_dotenv(dotenv.find_dotenv())
# Create the command line argument parser
parser = argparse.ArgumentParser(
description=DESCRIPTION, version=VERSION, epilog=EPILOG,
)
# Parse the arguments and execute the command
args = parser.parse_args()
try:
print("stub implementation")
parser.exit(0)
except Exception as e:
parser.error(str(e))
|
[
"benjamin@bengfort.com"
] |
benjamin@bengfort.com
|
57e75f26ee80bfea08e6b5076b340fb659ece915
|
f8ad6963bfc851657ea50c6a036cfad29cdd7f60
|
/Books/GodOfPython/P00_OriginalSource/ch16/ThreadServer.py
|
dab8cdf02274a063c8c05da2430fcaa1f42a73b4
|
[] |
no_license
|
foru120/PythonRepository
|
e1ab0265c0f50ef2e9acdf7447237c913560692b
|
db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98
|
refs/heads/master
| 2021-01-01T06:53:11.728109
| 2019-04-25T13:52:50
| 2019-04-25T13:52:50
| 97,541,222
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,101
|
py
|
#ThreadServer.py
from socket import *
import threading #쓰레드를 사용하기 위해
sevip = '127.0.0.1'
sevport = 62581
address = (sevip, sevport)
sevsock = socket(AF_INET, SOCK_STREAM)
sevsock.bind(address)
sevsock.listen()
print("waiting for connection...")
clisock , cliaddr = sevsock.accept()
print("connection from {}".format(cliaddr))
print("If you want to leave chat, just type !quit\n")
#쓰레드에서 실행될 코드를 담은 함수를 정의
def receive():
global clisock
while True:
data = clisock.recv(1024)
print(data.decode("UTF-8"), " *from Client")
clisock.close()
thread_recv = threading.Thread(target = receive, args = ()) #쓰레드 생성
thread_recv.start() #쓰레드 시작
while True:
try:
data = input("")
except KeyboardInterrupt:
break
if data =='!quit' or '': #!quit를 입력하면 while루프를 끝낸다.
clisock.close()
break
clisock.send(bytes(data,"UTF-8"))
sevsock.close()
print("disconnected")
|
[
"broodsky1122@hanmail.net"
] |
broodsky1122@hanmail.net
|
530f3e39a4f1874f054c822946fcc627683d1f44
|
ee730a381ef8113efedc62541f576393180b8f3e
|
/study/exceltest.py
|
11394000d715ae7256d135d624944609d047e5b1
|
[] |
no_license
|
happy789450/python
|
b17357232cfb004757b82cb880ddb5f208aec443
|
05de30bf82db6a93e93a93c5158a6d23a7fb2390
|
refs/heads/master
| 2023-04-11T10:33:26.340610
| 2021-04-24T05:52:56
| 2021-04-24T05:52:56
| 348,461,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
import datetime
from openpyxl import Workbook
wb = Workbook()
ws = wb.active
ws['A2'] = 1
# ws.append([1, 2, 3])
# ws.append([2, 2, 3])
# ws['A2'] = datetime.datetime.now()
# ws.append([datetime.datetime.now()])
# ws['A3'] = 666
# ws['A5'] = 'aaa'
wb.save("/home/rice/桌面/test/sample.xlsx")
|
[
"you@example.com"
] |
you@example.com
|
01713593bf73d9c8417c6c76a052d1b48e4e8f08
|
e98e7b45d85273797cf9f15e92fbe685a05bde18
|
/编码/0091.py
|
225393e7141d14cd27e7bed8e70a2df295c04195
|
[] |
no_license
|
wangdexinpython/test
|
8d29d30e099f64f831b51265db7092d520df253c
|
a047148409e31b8a8140f2c13b959aa54ec14d0d
|
refs/heads/master
| 2020-09-11T05:10:49.041795
| 2019-12-31T07:47:41
| 2019-12-31T07:47:41
| 221,948,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
import requests,re,json
headers={
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
}
url='https://www.haodf.com/doctor/DE4rO-XCoLUXxbXYw1mnSzFYx4.htm'
cons =requests.get(url,headers=headers).text
c1 = re.findall(r'<div id=\\"truncate_DoctorSpecialize\\" style=\\".*?;\\">(.*?)<\\/div>',cons)[0].replace(' ','').replace('\t','')
print(c1)
ss = c1.encode('utf-8').decode('unicode_escape').replace('\s','')
# sss =
# ss=json.loads(c1,encoding='unicode_escape')
print(ss)
|
[
"wangdexin@haxitag.com"
] |
wangdexin@haxitag.com
|
da1ea66272f627d41439ddc8369681fc1650cce4
|
6ac2c27121d965babbb4bcbc7c479c26bf60bdf5
|
/tests/node/Inequality.py
|
4eff81390322439b36c26cdc460d6162687b6a98
|
[
"MIT"
] |
permissive
|
Gawaboumga/PyMatex
|
5a2e18c3e17d3b76e814492f7e2ca63a57d720e9
|
3ccc0aa23211a064aa31a9b509b108cd606a4992
|
refs/heads/master
| 2020-03-28T01:40:32.341723
| 2018-12-20T13:49:12
| 2018-12-20T13:49:12
| 147,521,693
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 794
|
py
|
from tests import BaseTest
from pymatex.node import *
class InequalityTests(BaseTest.BaseTest):
def test_read_simple_inequality(self):
ast = self.parse(r'\sum_{i < j} i')
i = Variable('i')
self.assertEqual(ast, InequalitySummation(Inequality(i, Variable('j'), '<'), None, i))
def test_read_simple_inequality_with_constant(self):
ast = self.parse(r'\sum_{0 < j} i')
i = Variable('i')
self.assertEqual(ast, InequalitySummation(Inequality(Constant('0'), Variable('j'), '<'), None, i))
def test_read_multi_inequality(self):
ast = self.parse(r'\sum_{0 < i \leq j} i')
i = Variable('i')
self.assertEqual(ast, InequalitySummation(Inequality(Inequality(Constant('0'), i, '<'), Variable('j'), '\\leq'), None, i))
|
[
"yourihubaut@hotmail.com"
] |
yourihubaut@hotmail.com
|
a12389d70e412c4f3fcde17263755ac7d5be0efd
|
0d279444c768d10a7ee6c6ec26323947e05cfd01
|
/backend/delivery_user_profile/migrations/0001_initial.py
|
7a1b9a3c600b8b9ce8ef57537def7a338ad0b4da
|
[] |
no_license
|
crowdbotics-apps/listsaver-24496
|
e3574e92b092360d724147c78586142e07e98b7a
|
d8636cee59504dac335bc035fc890f80631de3b5
|
refs/heads/master
| 2023-03-05T08:15:13.712454
| 2021-02-13T13:56:15
| 2021-02-13T13:56:15
| 338,584,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
# Generated by Django 2.2.18 on 2021-02-13 13:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.URLField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ContactInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('phone', models.CharField(max_length=20)),
('address', models.TextField()),
('is_default', models.BooleanField()),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contactinfo_profile', to='delivery_user_profile.Profile')),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
44e44c6662496b3b74ca005acce2cc1e123d70e8
|
11f4852e9af2eeb5d238ddd7df9ccd713160a334
|
/train.py
|
0ee97b96ea9a28f250231624bba0f5cc8088e39b
|
[] |
no_license
|
chw0806-github/nucleus_detection
|
d174f6c924213a0c09b7c6f1990873f8819251ff
|
a53b10965b2963922b7c266bb93ad4cbe2906db0
|
refs/heads/master
| 2021-04-08T16:26:47.884953
| 2018-05-02T04:06:43
| 2018-05-02T04:06:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
#!/usr/bin/env python
# coding=utf-8
"""
python=3.5.2
"""
from data_input import read_train_data, read_test_data, prob_to_rles, mask_to_rle, resize, np
from model import get_unet, dice_coef
import pandas as pd
from post_process import post_processing
from skimage.io import imshow
import matplotlib.pyplot as plt
from keras.models import load_model
from keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint
epochs = 50
model_name = 'model-0416-bn.h5'
# best_model_name = 'model-dsbowl2018-0416-best.h5'
# get train_data
train_img, train_mask = read_train_data()
# get test_data
test_img, test_img_sizes = read_test_data()
# get u_net model
u_net = get_unet()
# fit model on train_data
print("\n Training...")
tb = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
# early_stopper = EarlyStopping(patience=5, verbose=1)
# check_pointer = ModelCheckpoint(best_model_name, verbose=1, save_best_only=True)
u_net.fit(train_img, train_mask, batch_size=16, epochs=epochs, callbacks=[tb])
print("\n Saving")
u_net.save(model_name)
print("\n load model")
u_net = load_model(model_name, custom_objects={'dice_coef': dice_coef})
print("\n Predicting and Saving predict")
# Predict on test data
test_mask = u_net.predict(test_img, verbose=1)
np.save("test_img_bn_pred", test_mask)
|
[
"993001803@qq.com"
] |
993001803@qq.com
|
63ef1df5231e708ee0376612469a1cf795a19d00
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/Algorithms in Python Live Coding & Design Techniques/src/005_dynamic-programming/staircase/Staircase.py
|
23c2f71d354f0cfe8b1116f091b4108956fea056
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 835
|
py
|
#Recursive Approach
def ways(n, k):
if n == 0:
return 1
if n < 0:
return 0
ans = 0
for i in range(1, k+1):
ans += ways(n-i, k)
return ans
#Dynamic Programming : Top Down Approach
def ways_top_down(n, k, dp):
if n == 0:
dp[n] = 1
return dp[n]
if n < 0:
return 0
if not dp[n] == -1:
return dp[n]
dp[n] = 0
for i in range(1, k+1):
dp[n] += ways_top_down(n-i, k, dp)
return dp[n]
#Dynamic Programming : Bottom Up Approach
def ways_bottom_up(n, k):
dp = [0] * (n+1)
dp[0] = 1
for step in range(1, n+1):
dp[step] = 0
for j in range(1, k+1):
if step - j >= 0:
dp[step] += dp[step - j]
return dp[n]
n = 4
steps = 3
dp = [-1]*(n+1)
print(ways_bottom_up(n, steps))
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
de2a2f46c04b3533461e9d8caf3a4a4d4aa0077b
|
bca9c2fa3c4c3d06dd612280ce39090a9dfab9bd
|
/neekanee/job_scrapers/plugins/com/link/verivo.py
|
42dac15cdc1dc889d2fb09f4ea617b24d30a63a9
|
[] |
no_license
|
thayton/neekanee
|
0890dd5e5cf5bf855d4867ae02de6554291dc349
|
f2b2a13e584469d982f7cc20b49a9b19fed8942d
|
refs/heads/master
| 2021-03-27T11:10:07.633264
| 2018-07-13T14:19:30
| 2018-07-13T14:19:30
| 11,584,212
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,331
|
py
|
import re, mechanize, urlparse
from neekanee.jobscrapers.jobscraper import JobScraper
from neekanee.htmlparse.soupify import soupify, get_all_text, get_mailto
from neekanee_solr.models import *
COMPANY = {
'name': 'Verivo',
'hq': 'Waltham, MA',
'home_page_url': 'http://www.verivo.com',
'jobs_page_url': 'http://www.verivo.com/about-us/careers/',
'empcnt': [51,200]
}
class VerivoJobScraper(JobScraper):
def __init__(self):
super(VerivoJobScraper, self).__init__(COMPANY)
def scrape_job_links(self, url):
jobs = []
self.br.open(url)
s = soupify(self.br.response().read())
x = {'title': 'Careers', 'href': self.company.jobs_page_url}
a = s.find('a', attrs=x)
x = {'class': 'children'}
u = a.findNext('ul', attrs=x)
r1 = re.compile(r'/about-us/careers/[a-z-]+/$')
r2 = re.compile(r'/careers/[a-z-]+/$')
for a in u.findAll('a', href=r1):
l = urlparse.urljoin(self.br.geturl(), a['href'])
self.br.open(l)
s = soupify(self.br.response().read())
x = {'class': 'list-posts-wrapper'}
d = s.find('div', attrs=x)
if not d:
continue
for a in d.findAll('a', href=r2):
s = soupify(self.br.response().read())
title = a.text.lower().strip()
if title.startswith('read more'):
continue
job = Job(company=self.company)
job.title = a.text
job.url = urlparse.urljoin(self.br.geturl(), a['href'])
job.location = self.company.location
jobs.append(job)
self.br.back()
return jobs
def scrape_jobs(self):
job_list = self.scrape_job_links(self.company.jobs_page_url)
self.prune_unlisted_jobs(job_list)
new_jobs = self.new_job_listings(job_list)
for job in new_jobs:
self.br.open(job.url)
s = soupify(self.br.response().read())
d = s.find('div', attrs={'class': 'site-content'})
job.desc = get_all_text(d)
job.save()
def get_scraper():
return VerivoJobScraper()
if __name__ == '__main__':
job_scraper = get_scraper()
job_scraper.scrape_jobs()
|
[
"thayton@neekanee.com"
] |
thayton@neekanee.com
|
034dbf6920b0dea17131a2d0fe980a11803de766
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/ptp1b_input/L86/86-84_MD_NVT_rerun/set.py
|
8869714c49649943cc15437ffa3f1f2ed05321c6
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
import os
dir = '/mnt/scratch/songlin3/run/ptp1b/L86/MD/ti_one-step/86_84/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../86-84_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
7b15765d3b5355212d948031d49b1d902c481e39
|
e70e8f9f5c1b20fe36feab42ad4c2c34fc094069
|
/Python/Advanced OOP/Problems/09. Programmer.py
|
19550c44940b74453ea18b29b992c9fbee136945
|
[
"MIT"
] |
permissive
|
teodoramilcheva/softuni-software-engineering
|
9247ca2032915d8614017a3762d3752b3e300f37
|
98dc9faa66f42570f6538fd7ef186d2bd1d39bff
|
refs/heads/main
| 2023-03-29T15:55:54.451641
| 2021-04-09T18:46:32
| 2021-04-09T18:46:32
| 333,551,625
| 0
| 0
| null | 2021-04-09T18:46:32
| 2021-01-27T20:30:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,422
|
py
|
class Programmer:
def __init__(self, name: str, language: str, skills: int):
self.name = name
self.language = language
self.skills = skills
def watch_course(self, course_name, language, skills_earned):
if self.language == language:
self.skills += skills_earned
return f'{self.name} watched {course_name}'
else:
return f'{self.name} does not know {language}'
def change_language(self, new_language, skills_needed):
if self.skills >= skills_needed:
if self.language != new_language:
previous_language = self.language
self.language = new_language
return f'{self.name} switched from {previous_language} to {new_language}'
else:
return f'{self.name} already knows {self.language}'
else:
needed_skills = skills_needed - self.skills
return f'{self.name} needs {needed_skills} more skills'
programmer = Programmer("John", "Java", 50)
print(programmer.watch_course("Python Masterclass", "Python", 84))
print(programmer.change_language("Java", 30))
print(programmer.change_language("Python", 100))
print(programmer.watch_course("Java: zero to hero", "Java", 50))
print(programmer.change_language("Python", 100))
print(programmer.watch_course("Python Masterclass", "Python", 84))
|
[
"noreply@github.com"
] |
teodoramilcheva.noreply@github.com
|
9bb70d70f0b770a991f5cd4cae1d17455809d883
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/XtcExplorer/tags/V00-01-05/src/pyana_ipimb.py
|
7ee053a8e5780fb49084fe992c18bc46ada427fb
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026
| 2015-09-03T22:22:11
| 2015-09-03T22:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,555
|
py
|
#
# ipimb.py: plot beamline data
#
#
import numpy as np
import matplotlib.pyplot as plt
from pypdsdata import xtc
from utilities import PyanaOptions
from utilities import IpimbData
# analysis class declaration
class pyana_ipimb ( object ) :
def __init__ ( self,
sources = None,
plot_every_n = "0",
accumulate_n = "0",
fignum = "1" ) :
"""
@param ipimb_addresses list of IPIMB addresses
@param plot_every_n Zero (don't plot until the end), or N (int, plot every N event)
@param accumulate_n Accumulate all (0) or reset the array every n shots
@param fignum matplotlib figure number
"""
# initialize data
opt = PyanaOptions()
self.sources = opt.getOptStrings(sources)
print "pyana_ipimb, %d sources: " % len(self.sources)
for source in self.sources :
print " ", source
self.plot_every_n = opt.getOptInteger(plot_every_n)
self.accumulate_n = opt.getOptInteger(accumulate_n)
self.mpl_num = opt.getOptInteger(fignum)
# other
self.n_shots = None
self.accu_start = None
# lists to fill numpy arrays
self.initlists()
def initlists(self):
self.fex_sum = {}
self.fex_channels = {}
self.fex_position = {}
self.raw_channels = {}
for source in self.sources :
self.fex_sum[source] = list()
self.fex_channels[source] = list()
self.fex_position[source] = list()
self.raw_channels[source] = list()
def resetlists(self):
self.accu_start = self.n_shots
for source in self.sources :
del self.fex_sum[source][:]
del self.fex_channels[source][:]
del self.fex_position[source][:]
del self.raw_channels[source][:]
def beginjob ( self, evt, env ) :
self.n_shots = 0
self.accu_start = 0
self.data = {}
for source in self.sources :
self.data[source] = IpimbData( source )
def event ( self, evt, env ) :
self.n_shots+=1
if evt.get('skip_event') :
return
# IPM diagnostics, for saturation and low count filtering
for source in self.sources :
# raw data
ipmRaw = evt.get(xtc.TypeId.Type.Id_IpimbData, source )
if ipmRaw :
channelVoltages = []
channelVoltages.append( ipmRaw.channel0Volts() )
channelVoltages.append( ipmRaw.channel1Volts() )
channelVoltages.append( ipmRaw.channel2Volts() )
channelVoltages.append( ipmRaw.channel3Volts() )
self.raw_channels[source].append( channelVoltages )
else :
print "pyana_ipimb: No IpimbData from %s found" % source
# feature-extracted data
ipmFex = evt.get(xtc.TypeId.Type.Id_IpmFex, source )
if ipmFex :
self.fex_sum[source].append( ipmFex.sum )
self.fex_channels[source].append( ipmFex.channel )
self.fex_position[source].append( [ipmFex.xpos, ipmFex.ypos] )
else :
print "pyana_ipimb: No IpmFex from %s found" % source
# ----------------- Plotting ---------------------
if self.plot_every_n != 0 and (self.n_shots%self.plot_every_n)==0 :
header = "DetInfo:IPIMB data shots %d-%d" % (self.accu_start, self.n_shots)
self.make_plots(title=header)
# convert dict to a list:
data_ipimb = []
for source in self.sources :
data_ipimb.append( self.data[source] )
# give the list to the event object
evt.put( data_ipimb, 'data_ipimb' )
# --------- Reset -------------
if self.accumulate_n!=0 and (self.n_shots%self.accumulate_n)==0 :
self.resetlists()
def endjob( self, evt, env ) :
# ----------------- Plotting ---------------------
header = "DetInfo:IPIMB data shots %d-%d" % (self.accu_start, self.n_shots)
self.make_plots(title=header)
# convert dict to a list:
data_ipimb = []
for source in self.sources :
data_ipimb.append( self.data[source] )
# give the list to the event object
evt.put( data_ipimb, 'data_ipimb' )
def make_plots(self, title = ""):
# -------- Begin: move this to beginJob
""" This part should move to begin job, but I can't get
it to update the plot in SlideShow mode when I don't recreate
the figure each time. Therefore plotting is slow...
"""
ncols = 3
nrows = len(self.sources)
height=3.5
if nrows * 3.5 > 12 : height = 12/nrows
width=height*1.3
fig = plt.figure(num=self.mpl_num, figsize=(width*ncols,height*nrows) )
fig.clf()
fig.subplots_adjust(wspace=0.45, hspace=0.45)
fig.suptitle(title)
self.ax = []
for i in range (0, 3*len(self.sources)):
self.ax.append( fig.add_subplot(nrows, ncols, i) )
# -------- End: move this to beginJob
i = 0
for source in self.sources :
xaxis = np.arange( self.accu_start, self.n_shots )
#xaxis = np.arange( 0, len(self.fex_channels[source]) )
#ax1 = fig.add_subplot(nrows, ncols, i)
#plt.axes(self.ax[i])
self.ax[i].clear()
plt.axes(self.ax[i])
array = np.float_(self.fex_sum[source])
#plt.hist(array, 60)
plt.plot(xaxis,array)
plt.title(source)
plt.ylabel('Sum of channels',horizontalalignment='left') # the other right
plt.xlabel('Shot number',horizontalalignment='left') # the other right
i+=1
self.data[source].fex_sum = array
#ax2 = fig.add_subplot(nrows, ncols, i)
self.ax[i].clear()
plt.axes(self.ax[i])
array = np.float_(self.fex_channels[source])
#plt.plot(xaxis, array[:,0],xaxis, array[:,1],xaxis, array[:,2],xaxis, array[:,3])
plt.hist(array[:,0], 60, histtype='stepfilled', color='r', label='Ch0')
plt.hist(array[:,1], 60, histtype='stepfilled', color='b', label='Ch1')
plt.hist(array[:,2], 60, histtype='stepfilled', color='y', label='Ch2')
plt.hist(array[:,3], 60, histtype='stepfilled', color='m', label='Ch3')
plt.title(source)
plt.xlabel('IPIMB Value',horizontalalignment='left') # the other right
leg = self.ax[i].legend()#('ch0','ch1','ch2','ch3'),'upper center')
i+=1
self.data[source].fex_channels = array
self.data[source].raw_channels = np.float_(self.raw_channels[source])
#ax3 = fig.add_subplot(nrows, ncols, i)
self.ax[i].clear()
plt.axes(self.ax[i])
array2 = np.float_(self.fex_position[source])
plt.scatter(array2[:,0],array2[:,1])
plt.title(source)
plt.xlabel('Beam position X',horizontalalignment='left')
plt.ylabel('Beam position Y',horizontalalignment='left')
i+=1
self.data[source].fex_position = array2
plt.draw()
|
[
"ofte@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
ofte@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7
|
09a018a907737d3860489bfb1e283fb3076ae253
|
5d0edf31b17c5375faf6126c1a7be8e79bfe2ab8
|
/buildout-cache/eggs/plone.app.i18n-2.0.3-py2.7.egg/plone/app/i18n/locales/browser/selector.py
|
b1c3042a8d637678e18e64d7d376cfaf252eb48d
|
[] |
no_license
|
renansfs/Plone_SP
|
27cba32ebd9fc03dae3941ec23cf1bf0a7b6667a
|
8a7bdbdb98c3f9fc1073c6061cd2d3a0ec80caf5
|
refs/heads/master
| 2021-01-15T15:32:43.138965
| 2016-08-24T15:30:19
| 2016-08-24T15:30:19
| 65,313,812
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,422
|
py
|
from zope.interface import implements
from zope.viewlet.interfaces import IViewlet
from Products.CMFCore.utils import getToolByName
from Products.Five.browser import BrowserView
class LanguageSelector(BrowserView):
"""Language selector.
>>> ls = LanguageSelector(None, dict(), None, None)
>>> ls
<plone.app.i18n.locales.browser.selector.LanguageSelector object at ...>
>>> ls.update()
>>> ls.available()
False
>>> ls.languages()
[]
>>> ls.showFlags()
False
>>> class Tool(object):
... use_cookie_negotiation = False
... supported_langs = ['de', 'en', 'ar']
... always_show_selector = False
...
... def __init__(self, **kw):
... self.__dict__.update(kw)
...
... def getSupportedLanguages(self):
... return self.supported_langs
...
... def showFlags(self):
... return True
...
... def getAvailableLanguageInformation(self):
... return dict(en={'selected' : True}, de={'selected' : False},
... nl={'selected' : True}, ar={'selected': True})
...
... def getLanguageBindings(self):
... # en = selected by user, nl = default, [] = other options
... return ('en', 'nl', [])
...
... def showSelector(self):
... return bool(self.use_cookie_negotiation or self.always_show_selector)
>>> ls.tool = Tool()
>>> ls.available()
False
>>> ls.tool = Tool(use_cookie_negotiation=True)
>>> ls.available()
True
>>> ls.languages()
[{'code': 'en', 'selected': True}, {'code': 'ar', 'selected': False},
{'code': 'nl', 'selected': False}]
>>> ls.showFlags()
True
>>> ls.tool = Tool(use_cookie_negotiation=True)
>>> ls.available()
True
>>> ls.tool = Tool(always_show_selector=True)
>>> ls.available()
True
>>> from zope.interface import implements
>>> from OFS.interfaces import IItem
>>> class Dummy(object):
... implements(IItem)
... def getPortalObject(self):
... return self
... def absolute_url(self):
... return 'absolute url'
>>> context = Dummy()
>>> context.portal_url = Dummy()
>>> ls = LanguageSelector(context, dict(), None, None)
>>> ls.portal_url()
'absolute url'
"""
implements(IViewlet)
def __init__(self, context, request, view, manager):
super(LanguageSelector, self).__init__(context, request)
self.context = context
self.request = request
self.view = view
self.manager = manager
def update(self):
self.tool = getToolByName(self.context, 'portal_languages', None)
def available(self):
if self.tool is not None:
selector = self.tool.showSelector()
languages = len(self.tool.getSupportedLanguages()) > 1
return selector and languages
return False
def portal_url(self):
portal_tool = getToolByName(self.context, 'portal_url', None)
if portal_tool is not None:
return portal_tool.getPortalObject().absolute_url()
return None
def languages(self):
"""Returns list of languages."""
if self.tool is None:
return []
bound = self.tool.getLanguageBindings()
current = bound[0]
def merge(lang, info):
info["code"] = lang
if lang == current:
info['selected'] = True
else:
info['selected'] = False
return info
languages = [merge(lang, info) for (lang, info) in
self.tool.getAvailableLanguageInformation().items()
if info["selected"]]
# sort supported languages by index in portal_languages tool
supported_langs = self.tool.getSupportedLanguages()
def index(info):
try:
return supported_langs.index(info["code"])
except ValueError:
return len(supported_langs)
return sorted(languages, key=index)
def showFlags(self):
"""Do we use flags?."""
if self.tool is not None:
return self.tool.showFlags()
return False
|
[
"renansfs@gmail.com"
] |
renansfs@gmail.com
|
a979b2457aa15f38669ab8f4705db11263d03091
|
2a171178942a19afe9891c2425dce208ae04348b
|
/kubernetes/test/test_runtime_raw_extension.py
|
cd032512be70fb92fa0bda986b64e64902f7a786
|
[
"Apache-2.0"
] |
permissive
|
ouccema/client-python
|
ac3f1dee1c5ad8d82f15aeecb87a2f5f219ca4f4
|
d7f33ec53e302e66674df581904a3c5b1fcf3945
|
refs/heads/master
| 2021-01-12T03:17:54.274888
| 2017-01-03T22:13:14
| 2017-01-03T22:13:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,460
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.0-snapshot
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.runtime_raw_extension import RuntimeRawExtension
class TestRuntimeRawExtension(unittest.TestCase):
""" RuntimeRawExtension unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testRuntimeRawExtension(self):
"""
Test RuntimeRawExtension
"""
model = kubernetes.client.models.runtime_raw_extension.RuntimeRawExtension()
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
544fc0b34f29d0d1c9f3ba62a97bca006fbb1688
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_1/srrmik001/question2.py
|
e69bea6d4cc917f38fed872b3bdcfe0cac9366f6
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
# Mikhaila Sorour
# 3 March 2014
# Program to check the validity of time
hours = eval(input("Enter the hours:\n"))
minutes = eval(input("Enter the minutes:\n"))
seconds = eval(input("Enter the seconds:\n"))
if (0 <= hours <= 24) and (0 <= minutes <= 59) and (0 <= seconds <= 59):
print("Your time is valid.")
else:
print ("Your time is invalid.")
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
9f9942f6f845c4509deedbae7579929c9e3bc703
|
cff52d298fcbc8d386e73a47cfbebc9be18290ce
|
/kuterless/nice_to_meet_you/control.py
|
27c923c5634f1997189e076391d8d59903636562
|
[] |
no_license
|
justminime/NeuroNet
|
b5c8c230133554ba5e9f971b956c64f6f1a62e79
|
683c3ffe53a9c4f5fd8c2b8c475e44febc859971
|
refs/heads/master
| 2023-08-08T00:34:49.475359
| 2018-12-18T22:03:30
| 2018-12-18T22:03:30
| 163,107,131
| 0
| 0
| null | 2023-09-06T06:59:37
| 2018-12-25T19:34:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,525
|
py
|
# -*- coding: utf-8 -*-
"""
This file contain the control services, used for all views
"""
#from coplay.models import UserProfile, Discussion, UserUpdate
from django.contrib.auth.models import User
from django.core.mail.message import EmailMessage
from django.core.urlresolvers import reverse
from django.template.base import Template
from django.template.context import Context
from django.template.loader import render_to_string
from django.utils import timezone
from rest_framework.authtoken.models import Token
from taggit.models import Tag
import kuterless.settings
#from gtts import gTTS
import pyqrcode
from kuterless.settings import SITE_URL, MEDIA_URL
def get_sound_to_play_name(acquaintance_id):
# return "media/message" + str(acquaintance_id)+".mp3"
num_of_messages = 8
return "media/audio_messages/default_message"+str(acquaintance_id % num_of_messages)+".mp3"
def get_buisness_card_name(buisness_card_id):
return "media/buisness_card" + str(buisness_card_id)+".vcf"
def get_buisness_qr_code_image_name(buisness_card_id):
return MEDIA_URL + "buisness_card_qr" + str(buisness_card_id)+".png"
def update_vcf_file(buisness_card_id,private_name = '',family_name = '', email = '', phone_number = '', url = ''):
serialized_vcard = ''
serialized_vcard += 'BEGIN:VCARD\r\n'#BEGIN:VCARD
serialized_vcard += 'VERSION:3.0\r\n'#VERSION:3.0
serialized_vcard += 'FN:'+ private_name + ' ' + family_name + '\r\n'#FN:Pname Fname
serialized_vcard += 'N:'+ family_name + ';' + private_name + ';;;\r\n'#N:Fname;Pname;;;
serialized_vcard += 'EMAIL;TYPE=INTERNET;TYPE=HOME:' + email + '\r\n'#EMAIL;TYPE=INTERNET;TYPE=HOME:mail@home.com
serialized_vcard += 'TEL;TYPE=CELL:' + phone_number + '\r\n'#TEL;TYPE=CELL:0522947775
if url:
serialized_vcard += 'URL:'+ url + '\r\n'#URL:hp.com
serialized_vcard += 'END:VCARD\r\n'#END:VCARD
with open(get_buisness_card_name(buisness_card_id), "wb") as f:
f.write( serialized_vcard)
buisness_card_url = reverse('nice_to_meet_you:scan_card', kwargs={'pk': str(buisness_card_id)})
url = pyqrcode.create(SITE_URL + buisness_card_url)
with open(get_buisness_qr_code_image_name(buisness_card_id)[1:], 'wb') as fstream:
url.png(fstream, scale=5)
def update_sound_to_play_file(acquaintance_id, message):
return None
# tts_object = gTTS( text = message, lang='en', slow=False)
# tts_object.save(get_sound_to_play_name(acquaintance_id))
|
[
"tzahimanmobile@gmail.com"
] |
tzahimanmobile@gmail.com
|
9eb2742835cce3707419ca4eba4a07b4310eb473
|
3780a5612c7a3c084f9106840e19e6fc07f135df
|
/prefect/migrations/0001_initial.py
|
752252a6f24b65190d8976e2f377c0f62ab37ea6
|
[
"MIT"
] |
permissive
|
dschien/energy-aggregator
|
7a6f487db7a5fdbafc92152371ffedfc39f71f2e
|
421638029c5066c3182e8c94424abf5a6b05c1ea
|
refs/heads/master
| 2021-01-01T05:11:16.303483
| 2016-10-25T17:52:01
| 2016-10-25T17:52:01
| 71,920,880
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-21 16:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('ep', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PrefectDeviceParameterConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('power_consumption', models.DecimalField(decimal_places=2, max_digits=7)),
('line', models.CharField(max_length=2)),
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='configuration', to='ep.Device')),
],
options={
'verbose_name': 'Prefect Device Configuration',
},
),
]
|
[
"dschien@gmail.com"
] |
dschien@gmail.com
|
2e388c4a7d35230a2f16bf9b5800ce21de947eb3
|
42bf795f97efe36291590879dd1d2e146df1a4f0
|
/User_details/decorators.py
|
6a3e46c903d5138679b94eed874fc62faa36b601
|
[] |
no_license
|
sameesayeed007/tango-backend
|
b0e05fcdfe4b6c5f5a2cb501f6ef1e94da145811
|
05923a80ef4e464a5baa27102d678b1d8858f787
|
refs/heads/master
| 2022-12-30T18:57:52.180073
| 2020-10-25T09:08:35
| 2020-10-25T09:08:35
| 291,616,184
| 0
| 2
| null | 2020-10-05T05:48:05
| 2020-08-31T04:41:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,099
|
py
|
from django.http import HttpResponse
from django.shortcuts import redirect
def unauthenticated_user(view_func):
def wrapper_func(request, *args, **kwargs):
if request.user.is_authenticated:
return redirect('/')
else:
return view_func(request, *args, **kwargs)
return wrapper_func
def allowed_users(allowed_roles=[]):
def decorator(view_func):
def wrapper_func(request, *args, **kwargs):
group = None
if request.user.groups.exists():
group = request.user.groups.all()[0].name
if group in allowed_roles:
return view_func(request, *args, **kwargs)
else:
return HttpResponse('You are not authorized to view this page')
return wrapper_func
return decorator
def admin_only(view_func):
def wrapper_function(request, *args, **kwargs):
group = None
if request.user.groups.exists():
group = request.user.groups.all()[0].name
if group == 'user':
return redirect('user-page')
elif group == 'admin':
return view_func(request, *args, **kwargs)
elif group == 'supplier':
return view_func(request, *args, **kwargs)
return wrapper_function
|
[
"sameesayeed880@gmail.com"
] |
sameesayeed880@gmail.com
|
814f5968de17f5952b71e28a3b6183bddd55a086
|
2e582bc42f104e93be85cf6abffd258a36a5ec15
|
/1487A/arena.py
|
6d266b328f22a0410a98405aa90b801f80cc2cba
|
[] |
no_license
|
akantuni/Codeforces
|
d2f86f1dd156aef300e797117a9ef21927c8fef0
|
6901a982cbaf705fdb22e5f78999a5b82917bb23
|
refs/heads/master
| 2023-06-14T18:02:37.316810
| 2021-07-10T19:13:13
| 2021-07-10T19:13:13
| 289,826,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
t = int(input())
for k in range(t):
n = int(input())
heroes = sorted(list(map(int, input().split())))
if len(set(heroes)) == 1:
print(0)
else:
for i, hero in enumerate(heroes):
if hero > heroes[0]:
print(n - i)
break
|
[
"akantuni@gmail.com"
] |
akantuni@gmail.com
|
56bb4c3df12e4fd71898de232f1404e82a3cb36e
|
54b31b705d88e21bc0b23aabe1df15ca13a07de2
|
/bayespy/discrete_example.py
|
680d647f49cdac991073a70d4427c783c026955a
|
[
"MIT",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"AFL-3.0",
"GPL-1.0-or-later",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bayespy/bayespy
|
307ef4c51d511e14d4693cce9929dda37124d11d
|
5fe58f7160ebc3a9df7f9e96e50d2bd47837794a
|
refs/heads/develop
| 2023-08-18T21:35:27.744022
| 2023-05-25T08:16:36
| 2023-05-25T08:16:36
| 5,568,322
| 655
| 164
|
MIT
| 2023-08-15T09:31:55
| 2012-08-27T08:10:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,910
|
py
|
# This example could be simplified a little bit by using Bernoulli instead of
# Categorical, but Categorical makes it possible to use more categories than
# just TRUE and FALSE.
import numpy as np
from bayespy.nodes import Categorical, Mixture
from bayespy.inference import VB
# NOTE: Python's built-in booleans don't work nicely for indexing, thus define
# own variables:
FALSE = 0
TRUE = 1
def _or(p_false, p_true):
"""
Build probability table for OR-operation of two parents
p_false: Probability table to use if both are FALSE
p_true: Probability table to use if one or both is TRUE
"""
return np.take([p_false, p_true], [[FALSE, TRUE], [TRUE, TRUE]], axis=0)
asia = Categorical([0.5, 0.5])
tuberculosis = Mixture(asia, Categorical, [[0.99, 0.01], [0.8, 0.2]])
smoking = Categorical([0.5, 0.5])
lung = Mixture(smoking, Categorical, [[0.98, 0.02], [0.25, 0.75]])
bronchitis = Mixture(smoking, Categorical, [[0.97, 0.03], [0.08, 0.92]])
xray = Mixture(tuberculosis, Mixture, lung, Categorical,
_or([0.96, 0.04], [0.115, 0.885]))
dyspnea = Mixture(bronchitis, Mixture, tuberculosis, Mixture, lung, Categorical,
[_or([0.6, 0.4], [0.18, 0.82]),
_or([0.11, 0.89], [0.04, 0.96])])
# Mark observations
tuberculosis.observe(TRUE)
smoking.observe(FALSE)
bronchitis.observe(TRUE) # not a "chance" observation as in the original example
# Run inference
Q = VB(dyspnea, xray, bronchitis, lung, smoking, tuberculosis, asia)
Q.update(repeat=100)
# Show results
print("P(asia):", asia.get_moments()[0][TRUE])
print("P(tuberculosis):", tuberculosis.get_moments()[0][TRUE])
print("P(smoking):", smoking.get_moments()[0][TRUE])
print("P(lung):", lung.get_moments()[0][TRUE])
print("P(bronchitis):", bronchitis.get_moments()[0][TRUE])
print("P(xray):", xray.get_moments()[0][TRUE])
print("P(dyspnea):", dyspnea.get_moments()[0][TRUE])
|
[
"jaakko.luttinen@iki.fi"
] |
jaakko.luttinen@iki.fi
|
ccd652427fbc4bbf97ee89ec2711fe34015941be
|
de8310ef0914f45bb3a5bba3b089edbf45148867
|
/ArraysAndStrings/URLify.py
|
5d34eb4fa524336e0ebef88751ba27fbcebd1eac
|
[] |
no_license
|
agbarker/Cracking-the-Coding-Interview-Python
|
f51a053ffe45bd89958f8b1e6eeb4db4c1ed6129
|
6358275b5f921d3c0203795d6951a6d3292333cf
|
refs/heads/master
| 2020-03-20T11:46:10.750589
| 2018-06-15T00:10:15
| 2018-06-15T00:10:15
| 137,411,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
"""Write a method to replace all spaces in a string with '%20'. You may assume that the string has sufficient space at the end to hold the additional characters, and that you are given the "true" length of the string."""
"""Replacement in place is not possible in python as strings are immutable. Much of the input is unecessary in python due to this constraint. I have eliminated the length variable input."""
def urlify(string):
"""Converts string with spaces to valid url.
>>> urlify('taco cat')
'taco%20cat'
>>> urlify('cat')
'cat'"""
tokens = string.split(" ")
i = 1
result = tokens[0]
while i in range(len(tokens)):
result = result + '%20' + tokens[i]
i += 1
return result
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
5cc67c74ecd44738a48c2a8a28aec26cb911ffc9
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_laming.py
|
7c430ecc3c5017417aea135fc105890846dec859
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
#calss header
class _LAMING():
def __init__(self,):
self.name = "LAMING"
self.definitions = lam
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['lam']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
ef38d0e49ad09bd73b645413d4327879867a7aba
|
33b2f392afef9b5e83e177fd3f0ef5582051b97d
|
/source_code/version_v1.0.2/wx/lib/pubsub/core/arg1/publisher.py
|
6dc684450d9987d3289544ec5b29c09da6a994a5
|
[] |
no_license
|
luke1987515/Compatibility_Get_Reprt
|
eb0edbc33afcd59fb1efcb53d7eee1658d2c3009
|
2707b9dec5ee8c0a0cc90b6a3d4682970b21a1cb
|
refs/heads/master
| 2021-01-10T07:39:06.111371
| 2016-01-26T06:34:51
| 2016-01-26T06:34:51
| 48,530,211
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,448
|
py
|
"""
Mixin for publishing messages to a topic's listeners. This will be
mixed into topicobj.Topic so that a user can use a Topic object to
send a message to the topic's listeners via a publish() method.
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE_BSD_Simple.txt for details.
"""
from .publisherbase import PublisherBase
class Publisher(PublisherBase):
"""
Publisher that allows old-style Message.data messages to be sent
to listeners. Listeners take one arg (required, unless there is an
*arg), but can have kwargs (since they have default values).
"""
def sendMessage(self, topicName, data=None):
"""Send message of type topicName to all subscribed listeners,
with message data. If topicName is a subtopic, listeners
of topics more general will also get the message.
Note that any listener that lets a raised exception escape will
interrupt the send operation, unless an exception handler was
specified via pub.setListenerExcHandler().
"""
topicMgr = self.getTopicMgr()
topicObj = topicMgr.getOrCreateTopic(topicName)
# don't care if topic not final: topicObj.getListeners()
# will return nothing if not final but notification will still work
topicObj.publish(data)
def getMsgProtocol(self):
return 'arg1'
|
[
"luke1987515@hotmail.com"
] |
luke1987515@hotmail.com
|
8a40cb445ac8644a94ee38c4497b48ecbcd1922d
|
47927ae79f1af279e186e76c85a77c962802262d
|
/number-of-provinces/number-of-provinces.py
|
1097ed6272ba003b0e3a436057a685835f5cbba7
|
[] |
no_license
|
asheeshcric/leetcode
|
810b53c05c44aecea70aaa333041edf9540dd532
|
8e651348b8aaff2c7bf2a09327b73ff23b6f613f
|
refs/heads/main
| 2023-08-15T12:57:43.347469
| 2021-10-12T23:53:14
| 2021-10-12T23:53:14
| 346,944,371
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,260
|
py
|
class Solution:
def findCircleNum(self, isConnected: List[List[int]]) -> int:
graph = self.buildGraph(isConnected)
print(graph)
def dfs(node):
stack = [node]
while stack:
node = stack.pop()
if node in visited:
continue
for neigh in graph[node]:
stack.append(neigh)
visited.add(node)
# Now find the number of connected components
components = 0
visited = set()
for node in graph:
if node not in visited:
dfs(node)
components += 1
return components
def buildGraph(self, isConnected):
graph = dict()
for node, connections in enumerate(isConnected):
graph[node] = set()
for other_node, connection in enumerate(connections):
if other_node == node:
continue
if connection == 1:
# This means the node is connected to the original node
graph[node].add(other_node)
return graph
|
[
"ashiz2013@gmail.com"
] |
ashiz2013@gmail.com
|
83ed6309f274d9843de88fa5fc4f0f94a7a6375c
|
050fc5ca698dfd7612dee42aa980fc7b5eee40a2
|
/tests/plugin/web/sw_fastapi/test_fastapi.py
|
db79f769eae13f255d37f5ef2543a8f5eaccad9a
|
[
"Apache-2.0"
] |
permissive
|
apache/skywalking-python
|
8ac6ce06630c519f9984a45e74c1fcc88cf5b9d6
|
1a360228c63cd246dd4c5dd8e1f09bdd5556ad7d
|
refs/heads/master
| 2023-09-05T02:45:56.225937
| 2023-08-28T22:19:24
| 2023-08-28T22:19:24
| 261,456,329
| 178
| 122
|
Apache-2.0
| 2023-08-28T22:19:26
| 2020-05-05T12:13:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,383
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Callable
import pytest
import requests
from skywalking.plugins.sw_fastapi import support_matrix
from tests.orchestrator import get_test_vector
from tests.plugin.base import TestPluginBase
@pytest.fixture
def prepare():
# type: () -> Callable
return lambda *_: requests.get('http://0.0.0.0:9090/users?test=test1&test=test2&test2=test2', timeout=5)
class TestPlugin(TestPluginBase):
@pytest.mark.parametrize('version', get_test_vector(lib_name='fastapi', support_matrix=support_matrix))
def test_plugin(self, docker_compose, version):
self.validate()
|
[
"noreply@github.com"
] |
apache.noreply@github.com
|
1c757ce5d3ece2fec1e9f67392d79f90b9cbe48d
|
377420d718094a37da2e170718cecd80435d425a
|
/google/ads/googleads/v4/resources/types/keyword_plan_ad_group_keyword.py
|
43255c2418b375282bce90bbdcbac956b32c4c27
|
[
"Apache-2.0"
] |
permissive
|
sammillendo/google-ads-python
|
ed34e737748e91a0fc5716d21f8dec0a4ae088c1
|
a39748521847e85138fca593f3be2681352ad024
|
refs/heads/master
| 2023-04-13T18:44:09.839378
| 2021-04-22T14:33:09
| 2021-04-22T14:33:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,104
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v4.enums.types import keyword_match_type
from google.protobuf import wrappers_pb2 as wrappers # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.resources",
marshal="google.ads.googleads.v4",
manifest={"KeywordPlanAdGroupKeyword",},
)
class KeywordPlanAdGroupKeyword(proto.Message):
r"""A Keyword Plan ad group keyword.
Max number of keyword plan keywords per plan: 10000.
Attributes:
resource_name (str):
Immutable. The resource name of the Keyword Plan ad group
keyword. KeywordPlanAdGroupKeyword resource names have the
form:
``customers/{customer_id}/keywordPlanAdGroupKeywords/{kp_ad_group_keyword_id}``
keyword_plan_ad_group (google.protobuf.wrappers_pb2.StringValue):
The Keyword Plan ad group to which this
keyword belongs.
id (google.protobuf.wrappers_pb2.Int64Value):
Output only. The ID of the Keyword Plan
keyword.
text (google.protobuf.wrappers_pb2.StringValue):
The keyword text.
match_type (google.ads.googleads.v4.enums.types.KeywordMatchTypeEnum.KeywordMatchType):
The keyword match type.
cpc_bid_micros (google.protobuf.wrappers_pb2.Int64Value):
A keyword level max cpc bid in micros (e.g.
$1 = 1mm). The currency is the same as the
account currency code. This will override any
CPC bid set at the keyword plan ad group level.
Not applicable for negative keywords. (negative
= true) This field is Optional.
negative (google.protobuf.wrappers_pb2.BoolValue):
Immutable. If true, the keyword is negative.
"""
resource_name = proto.Field(proto.STRING, number=1)
keyword_plan_ad_group = proto.Field(
proto.MESSAGE, number=2, message=wrappers.StringValue,
)
id = proto.Field(proto.MESSAGE, number=3, message=wrappers.Int64Value,)
text = proto.Field(proto.MESSAGE, number=4, message=wrappers.StringValue,)
match_type = proto.Field(
proto.ENUM,
number=5,
enum=keyword_match_type.KeywordMatchTypeEnum.KeywordMatchType,
)
cpc_bid_micros = proto.Field(
proto.MESSAGE, number=6, message=wrappers.Int64Value,
)
negative = proto.Field(proto.MESSAGE, number=7, message=wrappers.BoolValue,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
sammillendo.noreply@github.com
|
5f81c65e80c83744abc3c5e164e1f0a9d2296682
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/util/task/TaskMonitor.pyi
|
6d492035c2c3080b8d5e526b272a37ac4ea8c71f
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,972
|
pyi
|
import ghidra.util.task
import java.lang
class TaskMonitor(object):
"""
TaskMonitor provides an interface by means of which a
potentially long running task can show its progress and also check if the user
has cancelled the operation.
Operations that support a task monitor should periodically
check to see if the operation has been cancelled and abort. If possible, the
operation should also provide periodic progress information. If it can estimate a
percentage done, then it should use the setProgress(int) method,
otherwise it should just call the setMessage(String) method.
"""
DUMMY: ghidra.util.task.TaskMonitor = ghidra.util.task.StubTaskMonitor@167dbce
NO_PROGRESS_VALUE: int = -1
def addCancelledListener(self, listener: ghidra.util.task.CancelledListener) -> None:
"""
Add cancelled listener
@param listener the cancel listener
"""
...
def cancel(self) -> None:
"""
Cancel the task
"""
...
def checkCanceled(self) -> None:
"""
Check to see if this monitor has been canceled
@throws CancelledException if monitor has been cancelled
"""
...
def clearCanceled(self) -> None:
"""
Clear the cancellation so that this TaskMonitor may be reused
"""
...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getMaximum(self) -> long:
"""
Returns the current maximum value for progress
@return the maximum progress value
"""
...
def getMessage(self) -> unicode:
"""
Gets the last set message of this monitor
@return the message
"""
...
def getProgress(self) -> long:
"""
Returns the current progress value or {@link #NO_PROGRESS_VALUE} if there is no value
set
@return the current progress value or {@link #NO_PROGRESS_VALUE} if there is no value
set
"""
...
def hashCode(self) -> int: ...
def incrementProgress(self, incrementAmount: long) -> None:
"""
A convenience method to increment the current progress by the given value
@param incrementAmount The amount by which to increment the progress
"""
...
def initialize(self, max: long) -> None:
"""
Initialized this TaskMonitor to the given max values. The current value of this monitor
will be set to zero.
@param max maximum value for progress
"""
...
def isCancelEnabled(self) -> bool:
"""
Returns true if cancel ability is enabled
@return true if cancel ability is enabled
"""
...
def isCancelled(self) -> bool:
"""
Returns true if the user has cancelled the operation
@return true if the user has cancelled the operation
"""
...
def isIndeterminate(self) -> bool:
"""
Returns true if this monitor shows no progress
@return true if this monitor shows no progress
"""
...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def removeCancelledListener(self, listener: ghidra.util.task.CancelledListener) -> None:
"""
Remove cancelled listener
@param listener the cancel listener
"""
...
def setCancelEnabled(self, enable: bool) -> None:
"""
Set the enablement of the Cancel button
@param enable true means to enable the cancel button
"""
...
def setIndeterminate(self, indeterminate: bool) -> None:
"""
An indeterminate task monitor may choose to show an animation instead of updating progress
@param indeterminate true if indeterminate
"""
...
def setMaximum(self, max: long) -> None:
"""
Set the progress maximum value
<p><b>
Note: setting this value will reset the progress to be the max if the progress is currently
greater than the new new max value.</b>
@param max maximum value for progress
"""
...
def setMessage(self, message: unicode) -> None:
"""
Sets the message displayed on the task monitor
@param message the message to display
"""
...
def setProgress(self, value: long) -> None:
"""
Sets the current progress value
@param value progress value
"""
...
def setShowProgressValue(self, showProgressValue: bool) -> None:
"""
True (the default) signals to paint the progress information inside of the progress bar
@param showProgressValue true to paint the progress value; false to not
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def cancelEnabled(self) -> bool: ...
@cancelEnabled.setter
def cancelEnabled(self, value: bool) -> None: ...
@property
def cancelled(self) -> bool: ...
@property
def indeterminate(self) -> bool: ...
@indeterminate.setter
def indeterminate(self, value: bool) -> None: ...
@property
def maximum(self) -> long: ...
@maximum.setter
def maximum(self, value: long) -> None: ...
@property
def message(self) -> unicode: ...
@message.setter
def message(self, value: unicode) -> None: ...
@property
def progress(self) -> long: ...
@progress.setter
def progress(self, value: long) -> None: ...
@property
def showProgressValue(self) -> None: ... # No getter available.
@showProgressValue.setter
def showProgressValue(self, value: bool) -> None: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
4c776c689923c340b8f7275b6b5350139f3b45f6
|
551e1190a7b1da5694ecb812eecf0ed44a4025ee
|
/arrfill.py
|
82c3f569559ae964387875be01c812b0b23528ff
|
[] |
no_license
|
Destroyer4114/Python
|
5dc3d85a31c7d1867e71f050bdc84209d08a945c
|
82fbdc75f367cecb16166b2e03a0e6fc38da2c62
|
refs/heads/main
| 2023-08-11T19:01:09.588798
| 2021-10-09T11:13:12
| 2021-10-09T11:13:12
| 351,766,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
for i in range(int(input())):
n,m= map(int(),input().split())
s= [0]*(n)
for j in range(m):
x,y= map(int,input().split())
|
[
"mehul.20204114@mnnit.ac.in"
] |
mehul.20204114@mnnit.ac.in
|
dc109bca51260581e325a5a3ee31aad9bb8d3296
|
70fa6468c768d4ec9b4b14fc94fa785da557f1b5
|
/lib/surface/ml_engine/operations/cancel.py
|
889a587d266cce0d2d87ee0d2731e7bceb68bdb2
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
kylewuolle/google-cloud-sdk
|
d43286ef646aec053ecd7eb58566ab2075e04e76
|
75f09ebe779e99fdc3fd13b48621fe12bfaa11aa
|
refs/heads/master
| 2020-04-20T22:10:41.774132
| 2019-01-26T09:29:26
| 2019-01-26T09:29:26
| 169,131,028
| 0
| 0
|
NOASSERTION
| 2019-02-04T19:04:40
| 2019-02-04T18:58:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,350
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ml-engine operations cancel command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.ml_engine import operations
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml_engine import flags
from googlecloudsdk.command_lib.ml_engine import operations_util
def _AddCancelArgs(parser):
flags.OPERATION_NAME.AddToParser(parser)
class Cancel(base.SilentCommand):
"""Cancel a Cloud ML Engine operation."""
@staticmethod
def Args(parser):
_AddCancelArgs(parser)
def Run(self, args):
return operations_util.Cancel(operations.OperationsClient(),
args.operation)
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
0a62e91e6809dd26fae2e255a028678e38365747
|
d08b0a2ea1365e96c2143a3076d6f1cfce178321
|
/learnPython-master/Python基础代码/生成器-杨辉三角.py
|
9ad701e82460579ee6166ff43b0e04f20832dd4f
|
[] |
no_license
|
xueyes/py3_study
|
f64060e5dbfcbf11c8d61de8561ce90bbb4e3c19
|
a7d83b58ef95806f061f375952db604afe98bc13
|
refs/heads/master
| 2022-12-11T05:56:03.540612
| 2019-05-06T13:07:55
| 2019-05-06T13:07:55
| 162,883,421
| 1
| 0
| null | 2022-12-08T02:28:21
| 2018-12-23T11:02:31
|
HTML
|
UTF-8
|
Python
| false
| false
| 372
|
py
|
def triangles():
g = [1]
while True:
yield g
g.append(0)
g = [g[i] + g[i - 1] for i in range(len(g))]
# 方法二
# def Triangles():
# L = [1]
# while True:
# yield L
# L = [1] + [L[i-1]+L[i] for i in range(len(L)) if i>0] + [1]
n = 0
for t in triangles():
print(t)
n = n + 1
if n == 10:
break
|
[
"1401354763@qq.com"
] |
1401354763@qq.com
|
bf1a535305ed6702198c8e6f86a7e35fc5ee1d24
|
6a6984544a4782e131510a81ed32cc0c545ab89c
|
/src/trigger-sim/resources/test/InIceSMTTest.py
|
e76d9b6cc051e6da93843ab9522e021036a89dad
|
[] |
no_license
|
wardVD/IceSimV05
|
f342c035c900c0555fb301a501059c37057b5269
|
6ade23a2fd990694df4e81bed91f8d1fa1287d1f
|
refs/heads/master
| 2020-11-27T21:41:05.707538
| 2016-09-02T09:45:50
| 2016-09-02T09:45:50
| 67,210,139
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 601
|
py
|
#!/usr/bin/env python
from I3Tray import *
from icecube import icetray, dataclasses, dataio, trigger_sim
from os.path import expandvars
import sys
from icecube.trigger_sim.inice_test_modules import TestSource, TestModule
tray = I3Tray()
gcd_file = expandvars("$I3_TESTDATA/sim/GeoCalibDetectorStatus_2013.56429_V1.i3.gz")
tray.AddModule("I3InfiniteSource", prefix=gcd_file, stream=icetray.I3Frame.DAQ)
tray.AddModule(TestSource)
tray.AddModule("SimpleMajorityTrigger", TriggerConfigID = 1006)
tray.AddModule(TestModule, TriggerConfigID = 1006)
tray.Execute(100)
tray.Finish()
|
[
"wardvandriessche@gmail.com"
] |
wardvandriessche@gmail.com
|
c5b6007281a9882a1ead24bdfd1c6c3bfc974164
|
a110cda0dd755a0aeeccaa349de5b7c8f836f7d9
|
/Dynamo_0.9.X/Material.ImportAllFromProject.py
|
8ebeabec311a77bd8f881b36999e9aede5824adc
|
[] |
no_license
|
ksobon/archi-lab
|
26d93ef07e4f571e73a78bc40299edd3dc84c2a6
|
9a8a57eccca899ace78a998dc7698ff7754fae6b
|
refs/heads/master
| 2021-01-15T09:37:06.045588
| 2020-06-03T15:55:46
| 2020-06-03T15:55:46
| 26,090,112
| 6
| 5
| null | 2020-02-09T04:24:41
| 2014-11-02T19:02:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,885
|
py
|
# Copyright(c) 2015, Konrad K Sobon
# @arch_laboratory, http://archi-lab.net
# Import Element wrapper extension methods
import clr
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
# Import DocumentManager and TransactionManager
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
uiapp = DocumentManager.Instance.CurrentUIApplication
app = uiapp.Application
# Import RevitAPI
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
import sys
pyt_path = r'C:\Program Files (x86)\IronPython 2.7\Lib'
sys.path.append(pyt_path)
#The inputs to this node will be stored as a list in the IN variable.
dataEnteringNode = IN
RunIt = IN[1]
class CustomCopyHandler(IDuplicateTypeNamesHandler):
def OnDuplicateTypeNamesFound(self, args):
return DuplicateTypeAction.UseDestinationTypes
try:
if RunIt:
TransactionManager.Instance.EnsureInTransaction(doc)
errorReport = None
fileDoc = app.OpenDocumentFile(IN[0])
filter = ElementClassFilter(Material)
allMat = FilteredElementCollector(fileDoc).WherePasses(filter).ToElementIds()
trans = Autodesk.Revit.DB.Transform.Identity
co = CopyPasteOptions()
co.SetDuplicateTypeNamesHandler(CustomCopyHandler())
newIds = ElementTransformUtils.CopyElements(fileDoc, allMat, doc, trans, co)
output = []
if newIds != None:
for i in newIds:
output.append(doc.GetElement(i).ToDSType(False))
TransactionManager.Instance.TransactionTaskDone()
else:
errorReport = "Set Run it to true!"
except:
# if error accurs anywhere in the process catch it
import traceback
errorReport = traceback.format_exc()
#Assign your output to the OUT variable
if errorReport == None:
OUT = output
else:
OUT = errorReport
|
[
"ksobon1986@gmail.com"
] |
ksobon1986@gmail.com
|
63e88f32f327f0a4fdb1cdcaada92096e1338efc
|
b7fe089a067bdefd917af1c6b8f0701d33a3cf77
|
/tests/networks/backbones/test_efficientnet.py
|
230ffbbe1d834ecd3fc0f385fc495db1d46c43e4
|
[
"MIT"
] |
permissive
|
Ares2013/coreml
|
145a186a8e12f95d836118d30ba9d4e533862c03
|
cd9f8c5f2e8cec8da6c5842dd235339b2e32be38
|
refs/heads/master
| 2022-12-04T18:02:01.590739
| 2020-08-19T08:41:12
| 2020-08-19T08:41:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,722
|
py
|
"""Tests coreml.networks.backbones.efficientnet"""
import torch
import torch.nn as nn
import unittest
from coreml.networks.backbones.efficientnet import EfficientNet
class EfficientNetTestCase(unittest.TestCase):
"""Class to check the EfficientNet backbone"""
def test_efficientnet_b0(self):
"""Test efficientnet_b0"""
net = EfficientNet('tf_efficientnet_b0', num_classes=2, in_channels=1)
dummy = torch.ones((128, 1, 96, 64))
out = net(dummy)
self.assertTrue(out.shape, (128, 2))
def test_efficientnet_b4(self):
"""Test efficientnet_b4"""
net = EfficientNet('tf_efficientnet_b4', num_classes=2, in_channels=1)
dummy = torch.ones((128, 1, 96, 64))
out = net(dummy)
self.assertTrue(out.shape, (128, 2))
def test_efficientnet_b5(self):
"""Test efficientnet_b5"""
net = EfficientNet('tf_efficientnet_b5', num_classes=2, in_channels=1)
dummy = torch.ones((128, 1, 96, 64))
out = net(dummy)
self.assertTrue(out.shape, (128, 2))
def test_efficientnet_b7(self):
"""Test efficientnet_b7"""
net = EfficientNet('tf_efficientnet_b7', num_classes=2, in_channels=1)
dummy = torch.ones((128, 1, 96, 64))
out = net(dummy)
self.assertTrue(out.shape, (128, 2))
def test_efficientnet_features(self):
"""Test efficientnet extract_features"""
net = EfficientNet(
'tf_efficientnet_b0', num_classes=2, in_channels=1,
return_features=True)
dummy = torch.ones((128, 1, 224, 224))
out = net(dummy)
self.assertTrue(out.shape, (128, 1280, 7, 7))
if __name__ == "__main__":
unittest.main()
|
[
"amandalmia18@gmail.com"
] |
amandalmia18@gmail.com
|
8d6463e395c349d2b2e673b0ba78279a143e92dd
|
f77d7a92e64766c1aaa888e91fb0377f1acd37a4
|
/docs/photons_docs/config/ext/photons_errors.py
|
94f270010e4c9e3de934fa4dbcff7e81d8607203
|
[
"MIT"
] |
permissive
|
geoff-nixon/photons-core
|
e531b3dd6f20e51138d0a38a680f140dc01f409b
|
cd4aaca33a79485fe5bb8fc26bdf35b7a7064e4b
|
refs/heads/master
| 2020-05-04T09:04:49.753358
| 2019-03-31T05:10:36
| 2019-03-31T05:10:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,221
|
py
|
from photons_app.errors import PhotonsAppError
from photons_app import errors
from docutils.parsers.rst import Directive
from docutils import statemachine
import pkg_resources
import os
class ShowPhotonsErrorsModulesDirective(Directive):
has_content = True
def run(self):
template = []
for name in dir(errors):
thing = getattr(errors, name)
if thing is PhotonsAppError:
continue
if isinstance(thing, type) and issubclass(thing, PhotonsAppError):
template.extend([
".. autoclass:: photons_app.errors.{0}".format(name)
, ""
, " {0}".format(thing.desc)
, ""]
)
source = self.state_machine.input_lines.source(self.lineno - self.state_machine.input_offset - 1)
tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
lines = statemachine.string2lines('\n'.join(template), tab_width, convert_whitespace=True)
self.state_machine.insert_input(lines, source)
return []
def setup(app):
app.add_directive('photons_errors', ShowPhotonsErrorsModulesDirective)
|
[
"stephen@delfick.com"
] |
stephen@delfick.com
|
a6b5ab6e0082d3f1e3ba47450999a097a86f5258
|
cee8fb161f0bd4aa4345b1ec9c269e43cc10c2dd
|
/rtamt/operation/arithmetic/dense_time/online/exp_operation.py
|
54fad220102af8c5dda57df0aba9870f88bcf60c
|
[
"BSD-3-Clause"
] |
permissive
|
TomyYamy/rtamt
|
172510fc2b26188b1d7ed6f6cfefc6830508c9a1
|
a16db77b61028f774d81457ff22e666229a5432c
|
refs/heads/master
| 2023-09-05T00:06:44.375841
| 2021-11-12T09:46:58
| 2021-11-12T09:46:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
import math
from rtamt.operation.abstract_operation import AbstractOperation
class ExpOperation(AbstractOperation):
def __init__(self):
self.input = []
def update(self, input_list):
out = []
for in_sample in input_list:
out_time = in_sample[0]
out_value = math.exp(in_sample[1])
out.append([out_time, out_value])
return out
def update_final(self, *args, **kargs):
return self.update(args[0])
|
[
"dejan.nickovic@ait.ac.at"
] |
dejan.nickovic@ait.ac.at
|
0a2080933765ad216e8ef34c423e5dfdc1bbd3ec
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/0dade18f43cf5c015773d461ccff478f156179b5-<get_module_docs>-fix.py
|
6399b208333447abdd182b8d1999ac8a8e8cbaf6
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
def get_module_docs(self, path, contents):
'\n :type path: str\n :type contents: str\n :rtype: dict[str, any]\n '
module_doc_types = ['DOCUMENTATION', 'EXAMPLES', 'RETURN']
docs = {
}
def check_assignment(statement, doc_types=None):
'Check the given statement for a documentation assignment.'
for target in statement.targets:
if isinstance(target, ast.Tuple):
continue
if (doc_types and (target.id not in doc_types)):
continue
docs[target.id] = dict(yaml=statement.value.s, lineno=statement.lineno, end_lineno=(statement.lineno + len(statement.value.s.splitlines())))
module_ast = self.parse_module(path, contents)
if (not module_ast):
return {
}
if path.startswith('lib/ansible/modules/'):
for body_statement in module_ast.body:
if isinstance(body_statement, ast.Assign):
check_assignment(body_statement, module_doc_types)
elif path.startswith('lib/ansible/utils/module_docs_fragments/'):
for body_statement in module_ast.body:
if isinstance(body_statement, ast.ClassDef):
for class_statement in body_statement.body:
if isinstance(class_statement, ast.Assign):
check_assignment(class_statement)
else:
raise Exception(('unsupported path: %s' % path))
return docs
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
8d6b6341d97bf4d6bd372ca55ec0b55bb54141f4
|
33aeee667b5d55dfc48c39a96583b1eccf04961a
|
/P3python_dev/Demo/order_demo.py
|
005fc3ba980240b0df6bfebb9f7673f8853dc3f9
|
[] |
no_license
|
yangtingting123456/python-unittest-requests-htmlrunner
|
fc2cbec7f0b36e4aa1be905db0dcc34d32262e93
|
9f155cbf3fc25d68ba0420837d03b4d7ac71f2b6
|
refs/heads/master
| 2023-01-06T10:18:21.979337
| 2020-10-26T09:18:43
| 2020-10-26T09:18:43
| 307,299,936
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
#手动在控制台输入五个数字
nums=input('请输入任意五个数字:')
print(type(nums))
num=nums.split(' ')
print(type(num))
num.sort()
print(num)
num_list=num.sort(reverse=True)
print('%s'%num_list)
|
[
"3048903923@qq.com"
] |
3048903923@qq.com
|
efb63d86ca30c1448d7cb0cab52ccbbac460ef26
|
7ce8670cc2b63a01aa4fa08ff040dc2ea2304c04
|
/ecommarce/migrations/0022_auto_20180926_0539.py
|
c3e042f2634dc3a6a8d79a19d4f3431c3ae2b8ce
|
[] |
no_license
|
Shamsulhaq/sonomm
|
bca793f21fbbb698a498159fc592063751099cd2
|
0d1fed65bfcc8829b80c018ca460ce198869a21f
|
refs/heads/master
| 2020-03-26T08:05:46.667063
| 2018-11-28T18:41:35
| 2018-11-28T18:41:35
| 138,379,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
# Generated by Django 2.1.1 on 2018-09-26 05:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecommarce', '0021_auto_20180926_0535'),
]
operations = [
migrations.AlterField(
model_name='productbasic',
name='regular_price',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=9, null=True),
),
]
|
[
"bmshamsulhaq65@gmail.com"
] |
bmshamsulhaq65@gmail.com
|
e0b024e5b79c1d5f42ef9a23a54590144441cd5c
|
d2024f10e641ab2f28a888d23071edc032299498
|
/pacer/pacer.py
|
e0eca0bd7271916fc0daebeb3fbf4c3dc9da0b15
|
[] |
no_license
|
chen116/demo2018
|
6f2ae07150182b8e14a2eacbc57bdc79c03e6dee
|
d289545bcc30445be26e1381d5301d8f657d0c6e
|
refs/heads/master
| 2021-04-27T13:11:44.742650
| 2018-07-14T14:38:37
| 2018-07-14T14:38:37
| 122,435,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 734
|
py
|
import heartbeat
import host_guest_comm
import numpy as np
import time
window_size_hr=5
hb = heartbeat.Heartbeat(1024,window_size_hr,100,"vic.log",10,100)
monitoring_items = ["heart_rate","app_mode","frame_size","timeslice"]
comm = host_guest_comm.DomU(monitoring_items)
it = 1
matsize = 500
comm.write("app_mode", 4)
comm.write("frame_size", matsize)
for i in range(it*6):
# hb stuff
a= np.random.rand(matsize, matsize)
b= np.random.rand(matsize, matsize)
tn = time.time()
c= np.matmul(b,a.T)
print(time.time()-tn)
time.sleep(0.1)
hb.heartbeat_beat()
print(hb.get_instant_heartrate())
if i%window_size_hr==0:
comm.write("heart_rate", hb.get_window_heartrate())
comm.write("heart_rate", "done")
hb.heartbeat_finish()
|
[
"yvictorck@gmail.com"
] |
yvictorck@gmail.com
|
c5fc28a9aac563a1e87570944ad552bfb2e8010e
|
6ea83cee7623e2b1d28bb79d0e97645327627b0c
|
/old_code_before_revision_where_processed_google_sheet_csv_data_and_image_conversion_etc/hennur/pdf_generation/sub_elements/image.py
|
0a968642bfbf4c07c6528dd4d7de831eaa014877
|
[] |
no_license
|
santokalayil/Address_Directory_Generaation_-_ReportLab_Automation
|
6358134211f621a54c2734971947d52e22cce30a
|
ed00103c41aa358d5b3256ad0bfd5c4467ac231b
|
refs/heads/main
| 2023-07-19T01:22:49.959251
| 2021-08-21T18:19:00
| 2021-08-21T18:19:00
| 352,067,628
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
from reportlab.lib import colors
from pdf_generation.page_settings import *
from reportlab.platypus import Image, Table
class family_image:
def __init__(self, img_path):
self.img_path = img_path
def generate(self):
# pct = 0.9
img_width = (320 * 0.35) # * pct
img_height = (249 * 0.35) # * pct
img = Image(filename=self.img_path, width=img_width, height=img_height, )
img_table = Table(
data=[[img]],
colWidths=img_width,
rowHeights=img_height,
style=[
# The two (0, 0) in each attribute represent the range
# # of table cells that the style applies to. Since there's only one cell at (0, 0),
# it's used for both start and end of the range
('ALIGN', (0, 0), (0, 0), 'LEFT'),
# ('BOX', (0, 0), (0, 0), 2, colors.HexColor('#eeeeee')),
# The fourth argument to this style attribute is the border width
('VALIGN', (0, 0), (0, 0), 'MIDDLE'),
("TOPPADDING", (0, 0), (-1, -1), 0),
("BOTTOMPADDING", (0, 0), (-1, -1), 0),
("LEFTPADDING", (0, 0), (-1, -1), 0),
("RIGHTPADDING", (0, 0), (-1, -1), 0),
]
)
return img_table
|
[
"49450970+santokalayil@users.noreply.github.com"
] |
49450970+santokalayil@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.