blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
916770e0875df4e7da900aeb509ad5902babdd8b | 3222a9f99dc04eae93442f27a49259f2c74320a9 | /uninas/model/heads/darts.py | bfc37c2e68c8752756436216fae2b2833ea125a8 | [
"MIT"
] | permissive | Light-Reflection/uninas | 55a754c997b6d7a48aa6ee93fc4f1211c9a35740 | b5727308b48b2fe399cc9b5d5732f3f5fd913b35 | refs/heads/main | 2023-02-20T11:48:06.815664 | 2021-01-25T09:42:43 | 2021-01-25T09:42:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,486 | py | import torch
from uninas.model.heads.abstract import AbstractHead
from uninas.model.modules.abstract import AbstractModule
from uninas.utils.shape import Shape
from uninas.register import Register
import torch.nn as nn
class BasicDartsAuxHeadModule(nn.Module):
def __init__(self, c: int, num_classes: int, init_pool_stride: int):
super(BasicDartsAuxHeadModule, self).__init__()
self.features = nn.Sequential(
nn.ReLU(inplace=True),
nn.AvgPool2d(5, stride=init_pool_stride, padding=0, count_include_pad=False),
nn.Conv2d(c, 128, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 768, 2, bias=False),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True)
)
self.classifier = nn.Linear(768, num_classes)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.classifier(x.view(x.size(0), -1))
return x
class BasicDartsAuxHead(AbstractModule):
def __init__(self, init_pool_stride: int):
super().__init__()
self.auxiliary = None
self._add_to_kwargs(init_pool_stride=init_pool_stride)
def _build(self, s_in: Shape, c_out: int) -> Shape:
self.auxiliary = BasicDartsAuxHeadModule(c=s_in.num_features(), num_classes=c_out,
init_pool_stride=self.init_pool_stride)
return self.probe_outputs(s_in, multiple_outputs=False)
def forward(self, x):
return self.auxiliary(x)
@Register.network_head()
class DartsCifarAuxHead(AbstractHead):
"""
CIFAR network auxiliary head as in DARTS
"""
def set_dropout_rate(self, p=None):
return self.head_module.set_dropout_rate(p)
def _build(self, s_in: Shape, s_out: Shape) -> Shape:
""" assuming input size 8x8 """
self.head_module = BasicDartsAuxHead(init_pool_stride=3)
return self.head_module.build(s_in, s_out.num_features())
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.head_module(x)
@Register.network_head()
class DartsImageNetAuxHead(DartsCifarAuxHead):
"""
ImageNet network auxiliary head as in DARTS
"""
def _build(self, s_in: Shape, s_out: Shape) -> Shape:
""" assuming input size 14x14 """
self.head_module = BasicDartsAuxHead(init_pool_stride=2)
return self.head_module.build(s_in, s_out.num_features())
| [
"kevin.laube@uni-tuebingen.de"
] | kevin.laube@uni-tuebingen.de |
6f39418b10650d551820ea2a5e3c5c1f12f7d297 | ffbf33d8486a49c77152d814a2217620643f8ce1 | /example.py | 540b76aba22b847f4c3298421bec5b03ea063225 | [] | no_license | codesharedot/dentacoin-price-gnome-ticket | fad8c6ea24a152467bc518e5d466c1684dc6183e | 869eb92c9b3871f3bc79b3a0c5c792f52792b4a7 | refs/heads/master | 2020-08-21T07:44:30.869487 | 2019-10-18T21:55:26 | 2019-10-18T21:55:26 | 216,112,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | #!/usr/bin/env python
# rename to dentacoin.1r.60s.py and put in ~.config/argos
import re
from gi.repository import Gio
from datetime import datetime
import time
import json
import requests
def coin():
data = requests.get("https://api.coinmarketcap.com/v1/ticker/dentacoin/")
price = data.json()[0]["price_usd"]
coin_price = float(("{0:.2f}").format(float(price)))
return coin_price
usd = float(coin())
lastupdate = datetime.now().strftime("%H:%M:%S")
#print(" $" + str(usd) + " (" + str(lastupdate) + ") | iconName=invest-applet")
print("dentacoin $" + str(usd) + " | iconName=invest-applet")
print("---")
#print("Kraken: $" + str(usd) + " | iconName=gedit bash=gedit terminal=false")
print("---")
| [
"codeto@sent.com"
] | codeto@sent.com |
d0fe769b3f739e25981e583fdbcb874d4c996e23 | 446d9c9e98bac9bb7d6ba9d6f2639fd1ab0e68af | /pythonBook/chapter08/exercise8-45.py | 8a6d1acb304c0904f22d4d0c98285dad19176d3d | [] | no_license | thiagofb84jp/python-exercises | 062d85f4f95332549acd42bf98de2b20afda5239 | 88ad7365a0f051021034ac6f0683b3df2de57cdb | refs/heads/main | 2023-07-19T21:15:08.689041 | 2021-08-17T10:59:09 | 2021-08-17T10:59:09 | 308,311,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | # 8.45. Imprimindo os elementos de uma lista
ESPACOS_POR_NIVEL = 4
def imprimeElementos(l, nivel=0):
espacos = ' ' * ESPACOS_POR_NIVEL * nivel
if type(l) == list:
print(espacos, "[")
for e in l:
imprimeElementos(e, nivel + 1)
print(espacos, "]")
else:
print(espacos, l)
L = [1, [2, 3, 4, [5, 6, 7]]]
imprimeElementos(L)
| [
"thiagofb84jp@gmail.com"
] | thiagofb84jp@gmail.com |
56820791ef6465af1280c99946f90aa563aa1085 | bd55c7d73a95caed5f47b0031264ec05fd6ff60a | /apps/core/migrations/0028_auto_20180609_1122.py | 3cfe08f42b6976a4f54d3b2fbf6d1522a5943236 | [] | no_license | phonehtetpaing/ebdjango | 3c8610e2d96318aff3b1db89480b2f298ad91b57 | 1b77d7662ec2bce9a6377690082a656c8e46608c | refs/heads/main | 2023-06-26T13:14:55.319687 | 2021-07-21T06:04:58 | 2021-07-21T06:04:58 | 381,564,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,855 | py | # Generated by Django 2.0.5 on 2018-06-09 02:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0027_auto_20180609_1100'),
]
operations = [
migrations.AddField(
model_name='vendorbranch',
name='google_credentials',
field=models.TextField(null=True, verbose_name='Google認証'),
),
migrations.AlterField(
model_name='automessagecontroller',
name='auto_message_trigger',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagecontroller_auto_message_trigger', to='core.AutoMessageTrigger', verbose_name='auto_message_trigger'),
),
migrations.AlterField(
model_name='automessagehistory',
name='auto_message_condition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagehistory_auto_message_condition', to='core.AutoMessageCondition', verbose_name='auto_message_condition'),
),
migrations.AlterField(
model_name='automessagehistory',
name='auto_message_trigger',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagehistory_auto_message_trigger', to='core.AutoMessageTrigger', verbose_name='auto_message_trigger'),
),
migrations.AlterField(
model_name='automessagetrigger',
name='auto_message_condition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagetrigger_auto_message_condition', to='core.AutoMessageCondition', verbose_name='auto_message_condition'),
),
]
| [
"phonehtetpaing1221@gmail.com"
] | phonehtetpaing1221@gmail.com |
35d8138d7d2c4d5933446c2113970509bc84e41c | 5abb52c3ee859ee5876601025479e9d3214f829f | /meiduo/meiduo/utils/models.py | bf7a320e05ea579c837b00f9a19d711fe9043645 | [] | no_license | RapperDream/meiduo-18 | 05ca46628f5575b31d6a0b2115786dd3f0e57f5a | d7f5aad879f0e420ac16e577d107236bdec816ee | refs/heads/master | 2020-04-22T02:39:01.099998 | 2019-02-23T14:53:39 | 2019-02-23T14:53:39 | 170,057,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | from django.db import models
class BaseModel(models.Model):
"""
为模型类补充字段
"""
create_time = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
update_time = models.DateTimeField(auto_now=True, verbose_name="更新时间")
class Meta:
abstract = True # 说明是抽象模型类,只能继承使用,数据库迁移时不会创建
| [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
d5a3ec9ce37ad51f512af4b6d0789ab415b5ba1a | 975b2d421d3661e6770b601929d5f11d981d8985 | /msgraph/generated/models/simulation_automation_status.py | 92c255d18fe113a1bab8d5c3c7fd789f10db03ca | [
"MIT"
] | permissive | microsoftgraph/msgraph-sdk-python | a7c551b85daadeebf76ec4ae12668664ea639b42 | 27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949 | refs/heads/main | 2023-09-03T21:45:27.989672 | 2023-08-31T06:22:18 | 2023-08-31T06:22:18 | 534,665,999 | 135 | 18 | MIT | 2023-09-14T11:04:11 | 2022-09-09T14:00:17 | Python | UTF-8 | Python | false | false | 247 | py | from enum import Enum
class SimulationAutomationStatus(str, Enum):
Unknown = "unknown",
Draft = "draft",
NotRunning = "notRunning",
Running = "running",
Completed = "completed",
UnknownFutureValue = "unknownFutureValue",
| [
"GraphTooling@service.microsoft.com"
] | GraphTooling@service.microsoft.com |
d8721c5cbeafc2e9c1c1252860e2f8b37f17a391 | c2b0ee4ef8fb8e933966c7219a6cac6484ce03fb | /lib/python3.7/site-packages/storages/backends/apache_libcloud.py | ccbab93fa76a916cf01260f9bb1f3a9b98d4e080 | [
"MIT"
] | permissive | dukuaris/Django | 0b40e79d9e461c28064a83cc42d7710b49b43a19 | d34f3e3f09028511e96b99cae7faa1b46458eed1 | refs/heads/master | 2022-12-09T04:05:09.329256 | 2020-03-21T02:17:20 | 2020-03-21T02:17:20 | 236,935,131 | 0 | 0 | MIT | 2022-12-08T01:51:39 | 2020-01-29T08:24:36 | Python | UTF-8 | Python | false | false | 7,203 | py | # Django storage using libcloud providers
# Aymeric Barantal (mric at chamal.fr) 2011
#
import io
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import File
from django.core.files.storage import Storage
from django.utils.deconstruct import deconstructible
from django.utils.six import string_types
from django.utils.six.moves.urllib.parse import urljoin
try:
from libcloud.storage.providers import get_driver
from libcloud.storage.types import ObjectDoesNotExistError, Provider
except ImportError:
raise ImproperlyConfigured("Could not load libcloud")
@deconstructible
class LibCloudStorage(Storage):
"""Django storage derived class using apache libcloud to operate
on supported providers"""
def __init__(self, provider_name=None, option=None):
if provider_name is None:
provider_name = getattr(settings, 'DEFAULT_LIBCLOUD_PROVIDER', 'default')
self.provider = settings.LIBCLOUD_PROVIDERS.get(provider_name)
if not self.provider:
raise ImproperlyConfigured(
'LIBCLOUD_PROVIDERS %s not defined or invalid' % provider_name)
extra_kwargs = {}
if 'region' in self.provider:
extra_kwargs['region'] = self.provider['region']
# Used by the GoogleStorageDriver
if 'project' in self.provider:
extra_kwargs['project'] = self.provider['project']
try:
provider_type = self.provider['type']
if isinstance(provider_type, string_types):
module_path, tag = provider_type.rsplit('.', 1)
if module_path != 'libcloud.storage.types.Provider':
raise ValueError("Invalid module path")
provider_type = getattr(Provider, tag)
Driver = get_driver(provider_type)
self.driver = Driver(
self.provider['user'],
self.provider['key'],
**extra_kwargs
)
except Exception as e:
raise ImproperlyConfigured(
"Unable to create libcloud driver type %s: %s" %
(self.provider.get('type'), e))
self.bucket = self.provider['bucket'] # Limit to one container
def _get_bucket(self):
"""Helper to get bucket object (libcloud container)"""
return self.driver.get_container(self.bucket)
def _clean_name(self, name):
"""Clean name (windows directories)"""
return os.path.normpath(name).replace('\\', '/')
def _get_object(self, name):
"""Get object by its name. Return None if object not found"""
clean_name = self._clean_name(name)
try:
return self.driver.get_object(self.bucket, clean_name)
except ObjectDoesNotExistError:
return None
def delete(self, name):
"""Delete object on remote"""
obj = self._get_object(name)
if obj:
return self.driver.delete_object(obj)
else:
raise Exception('Object to delete does not exists')
def exists(self, name):
obj = self._get_object(name)
return bool(obj)
def listdir(self, path='/'):
"""Lists the contents of the specified path,
returning a 2-tuple of lists; the first item being
directories, the second item being files.
"""
container = self._get_bucket()
objects = self.driver.list_container_objects(container)
path = self._clean_name(path)
if not path.endswith('/'):
path = "%s/" % path
files = []
dirs = []
# TOFIX: better algorithm to filter correctly
# (and not depend on google-storage empty folder naming)
for o in objects:
if path == '/':
if o.name.count('/') == 0:
files.append(o.name)
elif o.name.count('/') == 1:
dir_name = o.name[:o.name.index('/')]
if dir_name not in dirs:
dirs.append(dir_name)
elif o.name.startswith(path):
if o.name.count('/') <= path.count('/'):
# TOFIX : special case for google storage with empty dir
if o.name.endswith('_$folder$'):
name = o.name[:-9]
name = name[len(path):]
dirs.append(name)
else:
name = o.name[len(path):]
files.append(name)
return (dirs, files)
def size(self, name):
obj = self._get_object(name)
return obj.size if obj else -1
def url(self, name):
provider_type = self.provider['type'].lower()
obj = self._get_object(name)
if not obj:
return None
try:
url = self.driver.get_object_cdn_url(obj)
except NotImplementedError as e:
object_path = '%s/%s' % (self.bucket, obj.name)
if 's3' in provider_type:
base_url = 'https://%s' % self.driver.connection.host
url = urljoin(base_url, object_path)
elif 'google' in provider_type:
url = urljoin('https://storage.googleapis.com', object_path)
elif 'azure' in provider_type:
base_url = ('https://%s.blob.core.windows.net' %
self.provider['user'])
url = urljoin(base_url, object_path)
else:
raise e
return url
def _open(self, name, mode='rb'):
remote_file = LibCloudFile(name, self, mode=mode)
return remote_file
def _read(self, name):
obj = self._get_object(name)
# TOFIX : we should be able to read chunk by chunk
return next(self.driver.download_object_as_stream(obj, obj.size))
def _save(self, name, file):
self.driver.upload_object_via_stream(iter(file), self._get_bucket(), name)
return name
class LibCloudFile(File):
"""File inherited class for libcloud storage objects read and write"""
def __init__(self, name, storage, mode):
self.name = name
self._storage = storage
self._mode = mode
self._is_dirty = False
self._file = None
def _get_file(self):
if self._file is None:
data = self._storage._read(self.name)
self._file = io.BytesIO(data)
return self._file
def _set_file(self, value):
self._file = value
file = property(_get_file, _set_file)
@property
def size(self):
if not hasattr(self, '_size'):
self._size = self._storage.size(self.name)
return self._size
def read(self, num_bytes=None):
return self.file.read(num_bytes)
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self.file = io.BytesIO(content)
self._is_dirty = True
def close(self):
if self._is_dirty:
self._storage._save(self.name, self.file)
self.file.close()
| [
"dukuaris@gmail.com"
] | dukuaris@gmail.com |
f0a4547e6a232f3f11bd3512475eab4acf560898 | 4c3e2557044884be630d3c6c47c3e446f951c681 | /Contest/ABC213/D.py | c9d5a6af81fab8ec26f50fef91b0b88fb51d270e | [] | no_license | monda00/AtCoder | 01bdf89338c22f1792fde7f85728e01d97e5fd34 | abf947f2cdfe87486ad8935ba078918d4809573a | refs/heads/master | 2021-11-10T00:54:01.144582 | 2021-11-07T13:24:03 | 2021-11-07T13:24:03 | 186,128,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | import sys
sys.setrecursionlimit(300000)
n = int(input())
G = [[] for _ in range(n+1)]
for _ in range(n-1):
a, b = map(int, input().split())
G[a].append(b)
G[b].append(a)
for i in range(n+1):
G[i].sort()
ans = []
def dfs(crr, pre):
ans.append(crr)
for nxt in G[crr]:
if nxt != pre:
dfs(nxt, crr)
ans.append(crr)
dfs(1, -1)
print(*ans)
| [
"monda0524@gmail.com"
] | monda0524@gmail.com |
bedd5181a1ba501bba3b0ec97635bac44371d927 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /070_oop/007_exceptions/_exercises/templates/GoCongr/019_Trapping Constraints but Not Errors.py | c7877049499a055c7aded2725d5c48260e4f679e | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 261 | py | # # Trapping Constraints but Not Errors
# ___ f x
# a.. x < 0, 'x must be negative'
# r___ x ** 2
#
# f(1) # error
#
# ___ reciprocal x
# a.. x != 0 # A useless assert!
# r_ 1 / x # Python checks for zero automatically | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
1eb144c9a4e3a9f82739a3589ab27584f365abd5 | fa9ede7ec36f30bb7c84a1006cbfaa4339b7b457 | /version1.py | de3d222e841437c0bf7e2b2f3040df482a2dc276 | [] | no_license | pushkargodbole/FlightGearPlotter | b004ce0eef1ac999d9f14eea0fa443e176834f73 | ab99ec50aa307d90fe05d9a2ea680d66dbaf2aee | refs/heads/master | 2021-01-17T14:50:56.509786 | 2013-05-01T00:55:31 | 2013-05-01T00:55:31 | 44,732,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,418 | py | #This file uses socket communication to interface with FG and plot the data using pylab plot
from threading import Thread
from pylab import *
from string import strip, split
from time import *
from socket import *
import csv
class storedata(Thread):
def run(self):
host = "localhost"
port = 5500
buf = 1048576
addr = (host,port)
UDPSock = socket(AF_INET,SOCK_DGRAM)
UDPSock.bind(addr)
alldata = open('alldata.csv','w')
alldata.write(str("elapsedtime(sec) altitude(ft) heading(deg) roll(deg) pitch(deg) yaw(deg) sideslip(deg) rollrate(degps) pitchrate(degps) yawrate(degps) alpha(deg) airspeed(kt) mach verticalspeed(fps)"))
alldata.write(str('\n'))
while 1:
alldata = open('alldata.csv','a')
data,addr = UDPSock.recvfrom(buf)
data = data.replace('"',' ')
alldata.write(str(data))
UDPSock.close()
element = []
element.append([]) # elapsedtime
element.append([]) # altitude
element.append([]) # heading
element.append([]) # roll
element.append([]) # pitch
element.append([]) # yaw
element.append([]) # sideslip
element.append([]) # rollrate
element.append([]) # pitchrate
element.append([]) # yawrate
element.append([]) # alpha
element.append([]) # airspeed
element.append([]) # mach
element.append([]) # verticalspeed
class retrive(Thread):
def run(self):
for n in range(0,14):
del element[n][:]
getdata = open('alldata.csv','r')
line = str(getdata.readline())
while(1):
line = str(getdata.readline())
if(len(line) > 5):
fields = line.split()
if(len(fields) == 14):
for i in range(0,14):
element[i].append(float(fields[i]))
class graph(Thread):
def run(self):
x = 0
y = 0
while(1):
sleep(0.1)
clf() # this is important because otherwise the previous plot also remains and then you dont get a dynamically shifting plot.
plot(element[0][x:],element[1][x:])
xmin, xmax = xlim()
ymin, ymax = ylim()
xlim(xmin, xmax)
ylim(ymin, ymax)
show()
draw()
x += 3
store_data = storedata()
store_data.start()
retrive = retrive()
retrive.start()
graph = graph()
graph.start()
| [
"none@none"
] | none@none |
ba46f4574a9026aa0e2c436c325cf9a5acddfd9a | 167c6226bc77c5daaedab007dfdad4377f588ef4 | /python/ql/test/library-tests/PointsTo/new/code/d_globals.py | 72a063b2a75f67e3bb19f2bda666bb531a728045 | [
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | github/codeql | 1eebb449a34f774db9e881b52cb8f7a1b1a53612 | d109637e2d7ab3b819812eb960c05cb31d9d2168 | refs/heads/main | 2023-08-20T11:32:39.162059 | 2023-08-18T14:33:32 | 2023-08-18T14:33:32 | 143,040,428 | 5,987 | 1,363 | MIT | 2023-09-14T19:36:50 | 2018-07-31T16:35:51 | CodeQL | UTF-8 | Python | false | false | 1,419 | py |
def j():
return tuple, dict
dict
dict = 7
dict
tuple = tuple
tuple
#Global assignment in local scope
g1 = None
def assign_global():
global g1
g1 = 101
return g1 # Cannot be None
#Assignment in local scope, but called from module level
g2 = None
def init():
global g2
g2 = 102
init()
g2 # Cannot be None
#Global set in init method
g3 = None
class Ugly(object):
def __init__(self):
global g3
g3 = 103
def meth(self):
return g3 # Cannot be None
#Redefine
x = 0
x = 1
x
if cond:
x = 3
if other_cond:
y = 1
else:
y = 2
if cond3:
pass
else:
pass
y
v3
class X(object):
y = y
v4 = v3
X # Undefined
g3
type
def k(arg):
type
g4 = None
def get_g4():
if not g4:
set_g4()
return g4 # Cannot be None
def set_g4():
set_g4_indirect()
def set_g4_indirect():
global g4
g4 = False
class modinit(object): #ODASA-5486
global z
z = 0
del modinit
#ODASA-4688
def outer():
def inner():
global glob
glob = 100
return glob
def otherInner():
return glob
inner()
def redefine():
global z, glob
z
z = 1
z
glob
glob = 50
glob
class D(object):
def __init__(self):
pass
def foo(self):
return dict
def use_list_attribute():
l = []
list.append(l, 0)
return l
| [
"mark@hotpy.org"
] | mark@hotpy.org |
6005b8869165ad3d3159ea1c6b46b400f44fdd8c | 7d8cceea2d82e5069dd1f5f99ae487b07e977a2a | /mqtt_to_redis/app.py | eaf30d272cb8dbf21d238acc4bcab72fa7c56b0f | [] | no_license | thingsroot/user_apps_example | af8e34d6741e4def0327adcc3963880ee5425dfc | 708bdea217f274f27000e50c753bf14d3839c674 | refs/heads/master | 2020-03-29T08:51:11.565486 | 2019-10-18T02:31:55 | 2019-10-18T02:31:55 | 149,729,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,149 | py |
from __future__ import unicode_literals
import re
import os
import sys
import json
import redis
import logging
from configparser import ConfigParser
import paho.mqtt.client as mqtt
console_out = logging.StreamHandler(sys.stdout)
console_out.setLevel(logging.DEBUG)
console_err = logging.StreamHandler(sys.stderr)
console_err.setLevel(logging.ERROR)
logging_handlers = [console_out, console_err]
logging_format = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'
logging_datefmt = '%a, %d %b %Y %H:%M:%S'
logging.basicConfig(level=logging.DEBUG, format=logging_format, datefmt=logging_datefmt, handlers=logging_handlers)
config = ConfigParser()
config.read('../config.ini')
mqtt_host = config.get('mqtt', 'host', fallback='127.0.0.1')
mqtt_port = config.getint('mqtt', 'port', fallback=1883)
mqtt_user = config.get('mqtt', 'user', fallback='root')
mqtt_password = config.get('mqtt', 'password', fallback='root')
mqtt_keepalive = config.getint('mqtt', 'keepalive', fallback=60)
redis_srv_url = config.get('redis', 'url', fallback='redis://127.0.0.1:6379')
redis_sts = redis.Redis.from_url(redis_srv_url + "/9", decode_responses=True) # device status (online or offline)
redis_cfg = redis.Redis.from_url(redis_srv_url + "/10", decode_responses=True) # device defines
redis_rel = redis.Redis.from_url(redis_srv_url + "/11", decode_responses=True) # device relationship
redis_rtdb = redis.Redis.from_url(redis_srv_url + "/12", decode_responses=True) # device real-time data
''' Set all data be expired after device offline '''
redis_offline_expire = 3600 * 24 * 7
match_topic = re.compile(r'^([^/]+)/(.+)$')
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
logging.info("Main MQTT Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
if rc != 0:
return
logging.info("Main MQTT Subscribe topics")
#client.subscribe("$SYS/#")
client.subscribe("+/data")
client.subscribe("+/device")
client.subscribe("+/status")
client.subscribe("+/event")
def on_disconnect(client, userdata, rc):
logging.error("Main MQTT Disconnect with result code "+str(rc))
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
g = match_topic.match(msg.topic)
if not g:
return
g = g.groups()
if len(g) < 2:
return
devid = g[0]
topic = g[1]
if topic == 'data':
if msg.retain != 0:
return
payload = json.loads(msg.payload.decode('utf-8', 'surrogatepass'))
if not payload:
logging.warning('Decode DATA JSON Failure: %s/%s\t%s', devid, topic, msg.payload.decode('utf-8', 'surrogatepass'))
return
# logging.debug('device: %s\tInput: %s\t Value: %s', g[0], g[1], json.dumps(payload))
r = redis_rtdb.hmset(devid, {
payload['input']: json.dumps(payload['data'])
})
return
if topic == 'device':
data = msg.payload.decode('utf-8', 'surrogatepass')
logging.debug('%s/%s\t%s', devid, topic, data)
info = json.loads(data)
if not info:
logging.warning('Decode JSON Failure: %s/%s\t%s', devid, topic, data)
return
gateid = info['gate']
devkeys = set(redis_rel.lrange(gateid, 0, 1000))
redis_rel.ltrim(gateid, 0, -1000)
devkeys.add(devid)
for key in devkeys:
redis_rel.lpush(gateid, key)
redis_rtdb.persist(devid)
redis_cfg.persist(devid)
redis_cfg.set(devid, json.dumps(info['info']))
redis_rel.persist('PARENT_{0}'.format(devid))
redis_rel.set('PARENT_{0}'.format(devid), gateid)
return
if topic == 'status':
gateid = devid
data = json.loads(msg.payload.decode('utf-8', 'surrogatepass'))
if not data:
logging.warning('Decode JSON Failure: %s/%s\t%s', devid, topic, data)
return
status = data['status']
redis_sts.set(gateid, status)
if status == 'OFFLINE':
redis_sts.expire(gateid, redis_offline_expire)
redis_rel.expire(gateid, redis_offline_expire)
devkeys = redis_rel.lrange(gateid, 0, 1000)
for dev in devkeys:
redis_cfg.expire(dev, redis_offline_expire)
redis_rtdb.expire(dev, redis_offline_expire)
redis_rel.expire('PARENT_{0}'.format(dev), redis_offline_expire)
else:
redis_sts.persist(gateid)
redis_rel.persist(gateid)
return
if topic == 'event':
data = json.loads(msg.payload.decode('utf-8', 'surrogatepass'))
return
# Listen on MQTT forwarding real-time data into redis, and forwarding configuration to frappe.
client = mqtt.Client(client_id="THINGSROOT_MQTT_TO_REDIS")
client.username_pw_set(mqtt_user, mqtt_password)
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
try:
logging.debug('MQTT Connect to %s:%d', mqtt_host, mqtt_port)
client.connect_async(mqtt_host, mqtt_port, mqtt_keepalive)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever(retry_first_connection=True)
except Exception as ex:
logging.exception(ex)
os._exit(1)
| [
"dirk@kooiot.com"
] | dirk@kooiot.com |
538eb464d379367267c75e13537743772642c15e | 2509936d814fb6cdd283c2549c518c8dfad9450c | /api/cases/migrations/0027_auto_20200522_0945.py | df345fb09bb3bc3809080ce3634bdf014159b5ae | [
"MIT"
] | permissive | uktrade/lite-api | 19f829119fa96de3f4862eb233845508b0fef7eb | b35792fc981220285ed9a7b3659aba460f1b207a | refs/heads/dev | 2023-08-25T10:11:17.594001 | 2023-08-24T14:24:43 | 2023-08-24T14:24:43 | 172,914,199 | 4 | 3 | MIT | 2023-09-14T17:36:47 | 2019-02-27T12:46:22 | Python | UTF-8 | Python | false | false | 1,350 | py | # Generated by Django 2.2.12 on 2020-05-22 09:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("cases", "0026_case_submitted_by"),
]
operations = [
migrations.AlterField(
model_name="casetype",
name="reference",
field=models.CharField(
choices=[
("oiel", "Open Individual Export Licence"),
("ogel", "Open General Export Licence"),
("oicl", "Open Individual Trade Control Licence"),
("siel", "Standard Individual Export Licence"),
("sicl", "Standard Individual Trade Control Licence"),
("sitl", "Standard Individual Transhipment Licence"),
("f680", "MOD F680 Clearance"),
("exhc", "MOD Exhibition Clearance"),
("gift", "MOD Gifting Clearance"),
("cre", "HMRC Query"),
("gqy", "Goods Query"),
("eua", "End User Advisory Query"),
("ogtcl", "Open General Trade Control Licence"),
("ogtl", "Open General Transhipment Licence"),
],
max_length=5,
unique=True,
),
),
]
| [
"noreply@github.com"
] | uktrade.noreply@github.com |
8e56ea5dfa85279b41b4265d552d820cf2397392 | c1f60f28cbd74a639dc89b22518ae33765267af4 | /foundationsync/setup.py | 8d05fc1483b487c168c522c41a87df11c461aad3 | [
"MIT"
] | permissive | DalavanCloud/indico-plugins-cern | b0a97dbbd7fb4dc272977b121ec92931ee316ad7 | bb67d2fb9e3d24faeeff2b78a5e9bcff52ac5f26 | refs/heads/master | 2020-04-22T08:58:17.025951 | 2019-01-21T14:25:08 | 2019-01-21T14:25:54 | 170,255,750 | 1 | 0 | null | 2019-02-12T05:08:40 | 2019-02-12T05:08:40 | null | UTF-8 | Python | false | false | 998 | py | # This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2018 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from __future__ import unicode_literals
from setuptools import setup
setup(
name='indico-plugin-foundationsync',
version='3.0-dev',
url='https://github.com/indico/indico-plugins-cern',
license='MIT',
author='Indico Team',
author_email='indico-team@cern.ch',
py_modules=('indico_foundationsync',),
zip_safe=False,
install_requires=[
'indico>=2.2.dev0',
'cx_Oracle',
],
entry_points={
'indico.plugins': {'foundationsync = indico_foundationsync:FoundationSyncPlugin'}
},
classifiers=[
'Environment :: Plugins',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
)
| [
"adrian.moennich@cern.ch"
] | adrian.moennich@cern.ch |
b166dad158b8822a8e6bdf7c8971568c9aa3df98 | f566dfc5ce189d30696b9bf8b7e8bf9b1ef45614 | /Example/TDDDPGExample/StabilizerTwoD/TDDDPG_StabilizerTwoDContinuous.py | 916d38cf04314dd7c1c33c06e6750203b8a36eb7 | [] | no_license | yangyutu/DeepReinforcementLearning-PyTorch | 3dac4ad67fa3a6301d65ca5c63532f2a278e21d7 | 7af59cb883e24429d42a228584cfc96c42f6d35b | refs/heads/master | 2022-08-16T13:46:30.748383 | 2022-07-30T05:47:47 | 2022-07-30T05:47:47 | 169,829,723 | 12 | 6 | null | null | null | null | UTF-8 | Python | false | false | 5,453 | py | from Agents.TDDDPG.TDDDPG import TDDDPGAgent
from Env.CustomEnv.StablizerTwoD import StablizerTwoDContinuous
from utils.netInit import xavier_init
import json
from torch import optim
from copy import deepcopy
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
import torch
torch.manual_seed(1)
class Critic(nn.Module):
def __init__(self, input_size, hidden_size):
super(Critic, self).__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, 1)
self.apply(xavier_init)
def forward(self, state, action):
"""
Params state and actions are torch tensors
"""
x = torch.cat([state, action], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
value = self.linear3(x)
return value
class Actor(nn.Module):
def __init__(self, input_size, hidden_size, output_size, learning_rate=3e-4):
super(Actor, self).__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_size)
self.apply(xavier_init)
def forward(self, state):
"""
Param state is a torch tensor
"""
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
action = torch.tanh(self.linear3(x))
return action
def select_action(self, state, noiseFlag = False):
return self.forward(state)
def plotPolicy(x, policy):
plt.plot(x, policy)
# for i in range(nbActions):
# idx, idy = np.where(policy == i)
# plt.plot(idx,idy, )
# first construct the neutral network
config = dict()
config['trainStep'] = 1500
config['targetNetUpdateStep'] = 100
config['memoryCapacity'] = 20000
config['trainBatchSize'] = 64
config['gamma'] = 0.9
config['tau'] = 0.01
config['actorLearningRate'] = 0.001
config['criticLearningRate'] = 0.001
config['netGradClip'] = 1
config['logFlag'] = False
config['logFileName'] = 'StabilizerOneDLog/traj'
config['logFrequency'] = 1000
config['episodeLength'] = 200
env = StablizerTwoDContinuous()
N_S = env.stateDim
N_A = env.nbActions
netParameter = dict()
netParameter['n_feature'] = N_S
netParameter['n_hidden'] = 100
netParameter['n_output'] = N_A
actorNet = Actor(netParameter['n_feature'],
netParameter['n_hidden'],
netParameter['n_output'])
actorTargetNet = deepcopy(actorNet)
criticNet = Critic(netParameter['n_feature'] + N_A,
netParameter['n_hidden'])
criticNetTwo = deepcopy(criticNet)
criticTargetNet = deepcopy(criticNet)
criticTargetNetTwo = deepcopy(criticNet)
actorOptimizer = optim.Adam(actorNet.parameters(), lr=config['actorLearningRate'])
criticOptimizer = optim.Adam(criticNet.parameters(), lr=config['criticLearningRate'])
criticOptimizerTwo = optim.Adam(criticNetTwo.parameters(), lr=config['criticLearningRate'])
actorNets = {'actor': actorNet, 'target': actorTargetNet}
criticNets = {'criticOne': criticNet, 'criticTwo': criticNetTwo, 'targetOne': criticTargetNet, 'targetTwo': criticTargetNetTwo}
optimizers = {'actor': actorOptimizer, 'criticOne':criticOptimizer, 'criticTwo': criticOptimizerTwo}
agent = TDDDPGAgent(config, actorNets, criticNets, env, optimizers, torch.nn.MSELoss(reduction='mean'), N_A)
xSet = np.linspace(-4,4,100)
ySet = np.linspace(-4,4,100)
policyX = np.zeros((100, 100))
policyY = np.zeros((100, 100))
value = np.zeros((100, 100))
for i, x in enumerate(xSet):
for j, y in enumerate(ySet):
state = torch.tensor([x, y], dtype=torch.float32).unsqueeze(0)
action = agent.actorNet.select_action(state, noiseFlag = False)
value[i, j] = agent.criticNetOne.forward(state, action).item()
action = action.detach().numpy()
policyX[i, j] = action[0][0]
policyY[i, j] = action[0][1]
np.savetxt('StabilizerPolicyXBeforeTrain.txt', policyX, fmt='%f')
np.savetxt('StabilizerPolicyYBeforeTrain.txt', policyY, fmt='%f')
np.savetxt('StabilizerValueBeforeTrain.txt', value, fmt='%f')
agent.train()
def customPolicy(state):
x = state[0]
# move towards negative
if x > 0.1:
action = 2
# move towards positive
elif x < -0.1:
action = 1
# do not move
else:
action = 0
return action
# storeMemory = ReplayMemory(100000)
# agent.perform_on_policy(100, customPolicy, storeMemory)
# storeMemory.write_to_text('performPolicyMemory.txt')
# transitions = storeMemory.fetch_all_random()
xSet = np.linspace(-4,4,100)
ySet = np.linspace(-4,4,100)
policyX = np.zeros((100, 100))
policyY = np.zeros((100, 100))
value = np.zeros((100, 100))
for i, x in enumerate(xSet):
for j, y in enumerate(ySet):
state = torch.tensor([x, y], dtype=torch.float32).unsqueeze(0)
action = agent.actorNet.select_action(state, noiseFlag = False)
value[i, j] = agent.criticNetOne.forward(state, action).item()
action = action.detach().numpy()
policyX[i, j] = action[0][0]
policyY[i, j] = action[0][1]
np.savetxt('StabilizerPolicyXAfterTrain.txt', policyX, fmt='%f')
np.savetxt('StabilizerPolicyYAfterTrain.txt', policyY, fmt='%f')
np.savetxt('StabilizerValueAfterTrain.txt', value, fmt='%f')
| [
"yangyutu123@gmail.com"
] | yangyutu123@gmail.com |
cbd6b19a6bbb07e66bd88b1d00cc2d3c6b529b71 | 03e3138f99f275d15d41a5c5bfb212f85d64d02e | /source/res/scripts/client/gui/miniclient/contacts.py | f9c766ea0ee384ce251a4bb8eb9d6dff9264828d | [] | no_license | TrenSeP/WorldOfTanks-Decompiled | e428728e7901146d0b599d02c930d70532232a97 | 1faa748acec1b7e435b657fd054ecba23dd72778 | refs/heads/1.4.1 | 2020-04-27T08:07:49.813023 | 2019-03-05T17:37:06 | 2019-03-05T17:37:06 | 174,159,837 | 1 | 0 | null | 2019-03-06T14:33:33 | 2019-03-06T14:24:36 | Python | UTF-8 | Python | false | false | 676 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/miniclient/contacts.py
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.shared import events, g_eventBus, EVENT_BUS_SCOPE
from helpers import aop
class _CreateSquadAspect(aop.Aspect):
def atCall(self, cd):
cd.avoid()
g_eventBus.handleEvent(events.LoadViewEvent(VIEW_ALIAS.SQUAD_PROMO_WINDOW), scope=EVENT_BUS_SCOPE.LOBBY)
class CreateSquadPointcut(aop.Pointcut):
def __init__(self):
aop.Pointcut.__init__(self, 'gui.Scaleform.daapi.view.lobby.user_cm_handlers', 'BaseUserCMHandler', 'createSquad', aspects=(_CreateSquadAspect,))
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
6e6d6cf4a65dc5d1d8f8ef1da4591bc0514da057 | f69a4729de938512da8016f79e4bc58aff192417 | /test6.py | 0daaf4e724b4f14856107c2a3b94a9b3592d8840 | [] | no_license | FreeFlyXiaoMa/personal_project | 98811995568f76bbef7d9b0d0f05bec94805f3f0 | ad15c417d656e12bfcac084eec476e7e76d78fa2 | refs/heads/master | 2023-02-11T05:11:56.310548 | 2021-01-07T14:15:33 | 2021-01-07T14:15:33 | 322,598,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | #!/usr/bin/env python # -*- coding: utf-8 -*-
# @Author : Ma
# @Time : 2021/1/5 17:58
# @File : test6.py
class Apple():
def __repr__(self):
return "苹果"
class Banana():
def __repr__(self):
return "香蕉"
class SimpleFactory():
@staticmethod
def get_gruit(name):
if name == 'a':
return Apple()
elif name == 'b':
return Banana()
re=SimpleFactory().get_gruit('a')
print(re) | [
"2429825530@qq.com"
] | 2429825530@qq.com |
83f7ea25ae04b9c66d3b24b1bc84f2e02c1bff5c | 92e3a6424326bf0b83e4823c3abc2c9d1190cf5e | /scripts/icehouse/opt/stack/tempest/tempest/api/compute/admin/test_security_groups.py | 40ae236920705c20a3a73d65de2f31ea11e1dc63 | [
"Apache-2.0"
] | permissive | AnthonyEzeigbo/OpenStackInAction | d6c21cf972ce2b1f58a93a29973534ded965d1ea | ff28cc4ee3c1a8d3bbe477d9d6104d2c6e71bf2e | refs/heads/master | 2023-07-28T05:38:06.120723 | 2020-07-25T15:19:21 | 2020-07-25T15:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,134 | py | # Copyright 2013 NTT Data
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class SecurityGroupsTestAdminJSON(base.BaseV2ComputeAdminTest):
@classmethod
def resource_setup(cls):
super(SecurityGroupsTestAdminJSON, cls).resource_setup()
cls.adm_client = cls.os_adm.security_groups_client
cls.client = cls.security_groups_client
def _delete_security_group(self, securitygroup_id, admin=True):
if admin:
resp, _ = self.adm_client.delete_security_group(securitygroup_id)
else:
resp, _ = self.client.delete_security_group(securitygroup_id)
self.assertEqual(202, resp.status)
@testtools.skipIf(CONF.service_available.neutron,
"Skipped because neutron do not support all_tenants"
"search filter.")
@test.attr(type='smoke')
@test.services('network')
def test_list_security_groups_list_all_tenants_filter(self):
# Admin can list security groups of all tenants
# List of all security groups created
security_group_list = []
# Create two security groups for a non-admin tenant
for i in range(2):
name = data_utils.rand_name('securitygroup-')
description = data_utils.rand_name('description-')
resp, securitygroup = (self.client
.create_security_group(name, description))
self.assertEqual(200, resp.status)
self.addCleanup(self._delete_security_group,
securitygroup['id'], admin=False)
security_group_list.append(securitygroup)
client_tenant_id = securitygroup['tenant_id']
# Create two security groups for admin tenant
for i in range(2):
name = data_utils.rand_name('securitygroup-')
description = data_utils.rand_name('description-')
resp, adm_securitygroup = (self.adm_client
.create_security_group(name,
description))
self.assertEqual(200, resp.status)
self.addCleanup(self._delete_security_group,
adm_securitygroup['id'])
security_group_list.append(adm_securitygroup)
# Fetch all security groups based on 'all_tenants' search filter
param = {'all_tenants': 'true'}
resp, fetched_list = self.adm_client.list_security_groups(params=param)
self.assertEqual(200, resp.status)
sec_group_id_list = map(lambda sg: sg['id'], fetched_list)
# Now check if all created Security Groups are present in fetched list
for sec_group in security_group_list:
self.assertIn(sec_group['id'], sec_group_id_list)
# Fetch all security groups for non-admin user with 'all_tenants'
# search filter
resp, fetched_list = self.client.list_security_groups(params=param)
self.assertEqual(200, resp.status)
# Now check if all created Security Groups are present in fetched list
for sec_group in fetched_list:
self.assertEqual(sec_group['tenant_id'], client_tenant_id,
"Failed to get all security groups for "
"non admin user.")
class SecurityGroupsTestAdminXML(SecurityGroupsTestAdminJSON):
_interface = 'xml'
| [
"cody@uky.edu"
] | cody@uky.edu |
3275b01b0757bc868e44091fbe6a3bc2654e7d97 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayOpenSearchboxDowngradePreconsultResponse.py | 01716209b4c570450aa7417705ea343616a00453 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 2,786 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.AccessCheckInfo import AccessCheckInfo
from alipay.aop.api.domain.BrandCertInfo import BrandCertInfo
from alipay.aop.api.domain.SearchBoxAppInfo import SearchBoxAppInfo
class AlipayOpenSearchboxDowngradePreconsultResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenSearchboxDowngradePreconsultResponse, self).__init__()
self._access_check_info = None
self._applicable_box_type = None
self._brand_cert_info = None
self._opt_principal_id = None
self._tiny_app_info = None
@property
def access_check_info(self):
return self._access_check_info
@access_check_info.setter
def access_check_info(self, value):
if isinstance(value, AccessCheckInfo):
self._access_check_info = value
else:
self._access_check_info = AccessCheckInfo.from_alipay_dict(value)
@property
def applicable_box_type(self):
return self._applicable_box_type
@applicable_box_type.setter
def applicable_box_type(self, value):
self._applicable_box_type = value
@property
def brand_cert_info(self):
return self._brand_cert_info
@brand_cert_info.setter
def brand_cert_info(self, value):
if isinstance(value, BrandCertInfo):
self._brand_cert_info = value
else:
self._brand_cert_info = BrandCertInfo.from_alipay_dict(value)
@property
def opt_principal_id(self):
return self._opt_principal_id
@opt_principal_id.setter
def opt_principal_id(self, value):
self._opt_principal_id = value
@property
def tiny_app_info(self):
return self._tiny_app_info
@tiny_app_info.setter
def tiny_app_info(self, value):
if isinstance(value, SearchBoxAppInfo):
self._tiny_app_info = value
else:
self._tiny_app_info = SearchBoxAppInfo.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayOpenSearchboxDowngradePreconsultResponse, self).parse_response_content(response_content)
if 'access_check_info' in response:
self.access_check_info = response['access_check_info']
if 'applicable_box_type' in response:
self.applicable_box_type = response['applicable_box_type']
if 'brand_cert_info' in response:
self.brand_cert_info = response['brand_cert_info']
if 'opt_principal_id' in response:
self.opt_principal_id = response['opt_principal_id']
if 'tiny_app_info' in response:
self.tiny_app_info = response['tiny_app_info']
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
8c1cde682e17c15befb669b1f03ab4f799f0ca35 | 00cb3a46cd318463d2e1d91af56e1d01afc6b983 | /reclab/metrics/setup.py | 9fddacf9eb9f33fca7cc09f0d32bee1c265a55fa | [
"MIT"
] | permissive | tgsmith61591/reclab | 5f5527d9d78a0ed9646011a2a1da5e42a3ad731e | 423c9b5dd6e00def548b5cc29d3eca4dcc8c4a88 | refs/heads/master | 2020-03-30T18:05:36.897435 | 2018-11-08T00:37:10 | 2018-11-08T00:37:10 | 151,483,108 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | # -*- coding: utf-8 -*-
import os
import numpy
from numpy.distutils.misc_util import Configuration
from reclab._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
cblas_libs, blas_info = get_blas_info()
# Use this rather than cblas_libs so we don't fail on Windows
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration("metrics", parent_package, top_path)
config.add_extension("_ranking_fast",
sources=["_ranking_fast.pyx"],
include_dirs=[numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=libraries,
extra_compile_args=blas_info.pop(
'extra_compile_args', []),
**blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| [
"tgsmith61591@gmail.com"
] | tgsmith61591@gmail.com |
5c05020997aad86744f67798e0dc8daf37327b6d | 0794f4cf49de3b72fdff399b1911230a776ca75a | /tests/backends/conftest.py | 613f7b81a3e8bacaa6ff0f54f983c704d0585230 | [
"MIT"
] | permissive | newtonproject/newchain-lib-keys-py | f2240a4c14dfbc41716b2660935be51f6170ac7d | f4bd79b1022a047333cd791143544333aff85cf5 | refs/heads/master | 2022-09-24T05:28:28.030264 | 2022-09-18T14:29:11 | 2022-09-18T14:29:11 | 174,458,539 | 0 | 1 | MIT | 2022-09-18T14:29:12 | 2019-03-08T02:56:44 | Python | UTF-8 | Python | false | false | 3,842 | py | import pytest
from eth_utils import (
decode_hex,
keccak,
)
MSG = b'message'
MSGHASH = keccak(MSG)
# This is a sample of signatures generated with a known-good implementation of the ECDSA
# algorithm, which we use to test our ECC backends. If necessary, it can be generated from scratch
# with the following code:
"""
from devp2p import crypto
from eth_utils import encode_hex
msg = b'message'
msghash = crypto.sha3(b'message')
for secret in ['alice', 'bob', 'eve']:
print("'{}': dict(".format(secret))
privkey = crypto.mk_privkey(secret)
pubkey = crypto.privtopub(privkey)
print(" privkey='{}',".format(encode_hex(privkey)))
print(" pubkey='{}',".format(encode_hex(crypto.privtopub(privkey))))
ecc = crypto.ECCx(raw_privkey=privkey)
sig = ecc.sign(msghash)
print(" sig='{}',".format(encode_hex(sig)))
print(" raw_sig='{}')".format(crypto._decode_sig(sig)))
assert crypto.ecdsa_recover(msghash, sig) == pubkey
"""
# Compressed public keys have been calculated with coincurve
SECRETS = {
"alice": dict(
privkey=decode_hex('0x9c0257114eb9399a2985f8e75dad7600c5d89fe3824ffa99ec1c3eb8bf3b0501'),
pubkey=decode_hex('0x5eed5fa3a67696c334762bb4823e585e2ee579aba3558d9955296d6c04541b426078dbd48d74af1fd0c72aa1a05147cf17be6b60bdbed6ba19b08ec28445b0ca'), # noqa: E501
compressed_pubkey=decode_hex('0x025eed5fa3a67696c334762bb4823e585e2ee579aba3558d9955296d6c04541b42'), # noqa: E501
sig=decode_hex('0xb20e2ea5d3cbaa83c1e0372f110cf12535648613b479b64c1a8c1a20c5021f380434d07ec5795e3f789794351658e80b7faf47a46328f41e019d7b853745cdfd01'), # noqa: E501
raw_sig=(
1,
80536744857756143861726945576089915884233437828013729338039544043241440681784,
1902566422691403459035240420865094128779958320521066670269403689808757640701,
)
),
"bob": dict(
privkey=decode_hex('0x38e47a7b719dce63662aeaf43440326f551b8a7ee198cee35cb5d517f2d296a2'),
pubkey=decode_hex('0x347746ccb908e583927285fa4bd202f08e2f82f09c920233d89c47c79e48f937d049130e3d1c14cf7b21afefc057f71da73dec8e8ff74ff47dc6a574ccd5d570'), # noqa: E501
compressed_pubkey=decode_hex('0x02347746ccb908e583927285fa4bd202f08e2f82f09c920233d89c47c79e48f937'), # noqa: E501
sig=decode_hex('0x5c48ea4f0f2257fa23bd25e6fcb0b75bbe2ff9bbda0167118dab2bb6e31ba76e691dbdaf2a231fc9958cd8edd99507121f8184042e075cf10f98ba88abff1f3601'), # noqa: E501
raw_sig=(
1,
41741612198399299636429810387160790514780876799439767175315078161978521003886,
47545396818609319588074484786899049290652725314938191835667190243225814114102,
),
),
"eve": dict(
privkey=decode_hex('0x876be0999ed9b7fc26f1b270903ef7b0c35291f89407903270fea611c85f515c'),
pubkey=decode_hex('0xc06641f0d04f64dba13eac9e52999f2d10a1ff0ca68975716b6583dee0318d91e7c2aed363ed22edeba2215b03f6237184833fd7d4ad65f75c2c1d5ea0abecc0'), # noqa: E501
compressed_pubkey=decode_hex('0x02c06641f0d04f64dba13eac9e52999f2d10a1ff0ca68975716b6583dee0318d91'), # noqa: E501
sig=decode_hex('0xbabeefc5082d3ca2e0bc80532ab38f9cfb196fb9977401b2f6a98061f15ed603603d0af084bf906b2cdf6cdde8b2e1c3e51a41af5e9adec7f3643b3f1aa2aadf00'), # noqa: E501
raw_sig=(
0,
84467545608142925331782333363288012579669270632210954476013542647119929595395,
43529886636775750164425297556346136250671451061152161143648812009114516499167,
),
),
}
@pytest.fixture(params=['alice', 'bob', 'eve'])
def key_fixture(request):
if request.param == 'alice':
return SECRETS['alice']
elif request.param == 'bob':
return SECRETS['bob']
elif request.param == 'eve':
return SECRETS['eve']
else:
assert False, "Should be unreachable"
| [
"pipermerriam@gmail.com"
] | pipermerriam@gmail.com |
51303d9fe9a121fe0c3a0c7c1cea24cbde19d03f | cca3f6a0accb18760bb134558fea98bb87a74806 | /abc170/A/main.py | 24bdf5522f64ecbc21cfe988cd81c6b5c5b857cd | [] | no_license | Hashizu/atcoder_work | 5ec48cc1147535f8b9d0f0455fd110536d9f27ea | cda1d9ac0fcd56697ee5db93d26602dd8ccee9df | refs/heads/master | 2023-07-15T02:22:31.995451 | 2021-09-03T12:10:57 | 2021-09-03T12:10:57 | 382,987,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | #!/usr/bin/env python3
import sys
def solve(x: "List[int]"):
for i in range(len(x)):
if x[i] == 0:
print(i+1)
return
return
# Generated by 1.1.7.1 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
x = [int(next(tokens)) for _ in range(5)] # type: "List[int]"
solve(x)
if __name__ == '__main__':
main()
| [
"athenenoctus@gmail.com"
] | athenenoctus@gmail.com |
c888e004adcc49fd95d80b8acd22bdc3453d154d | b0ed67c452f79da72120b185960bf06711695fdd | /for_loop/clever_Lily.py | e74892cb560057dbd786a3aa86f11a5bb257b476 | [] | no_license | NikiDimov/SoftUni-Python-Basics | c0e1ae37867c1cfa264f8a19fdfba68b349df5d3 | f02045578930d03edbbd073995867eabfb171bbc | refs/heads/main | 2023-07-11T01:10:25.612754 | 2021-08-19T10:26:12 | 2021-08-19T10:26:12 | 345,221,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | age = int(input())
washing_machine_price = float(input())
toy_price = int(input())
def safe_money(age, toy_price):
money = 0
counter = 0
for birthday in range(1, age + 1):
if birthday % 2 == 0:
counter += 1
money += 10*counter-1
else:
money += toy_price
return money
money = safe_money(age, toy_price)
def output(money, washing_machine_price):
if money >= washing_machine_price:
return f"Yes! {money - washing_machine_price:.2f}"
return f"No! {washing_machine_price - money:.2f}"
print(output(money, washing_machine_price))
| [
"niki.dimov86@gmail.com"
] | niki.dimov86@gmail.com |
f3a4069409b3db8256545974736d20c43b9938ef | ad0857eaba945c75e705594a53c40dbdd40467fe | /baekjoon/python/grade.py | f40701442fb4c7e58ba77ac14c5da99841f35bff | [
"MIT"
] | permissive | yskang/AlgorithmPractice | c9964d463fbd0d61edce5ba8b45767785b0b5e17 | 3efa96710e97c8740d6fef69e4afe7a23bfca05f | refs/heads/master | 2023-05-25T13:51:11.165687 | 2023-05-19T07:42:56 | 2023-05-19T07:42:56 | 67,045,852 | 0 | 0 | null | 2021-06-20T02:42:27 | 2016-08-31T14:40:10 | Python | UTF-8 | Python | false | false | 290 | py | # Grade
# https://www.acmicpc.net/problem/9498
score = int(input())
result = ''
if score < 60:
result = 'F'
elif 60 <= score < 70:
result = 'D'
elif 70 <= score < 80:
result = 'C'
elif 80 <= score < 90:
result = 'B'
else:
result = 'A'
print(result)
| [
"yongsung.kang@gmail.com"
] | yongsung.kang@gmail.com |
6814364d59dcaf12920348c9cbd566cb3f9268ed | 4e958672a47409ebafd41704a30de781450f60e5 | /NtForm5/enroll/models.py | f2f516e23579dba98e555a63879ff7721059ab94 | [] | no_license | Nitesh-Singh-5/Understanding-Django-from-scratch | 7f704a2c0101d1e4a535c61a447a4e3775539b52 | b941151bdc2bee555b590f90e4a4c17e2a83db96 | refs/heads/master | 2022-12-28T20:23:03.042537 | 2020-10-13T15:10:55 | 2020-10-13T15:10:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | from django.db import models
# Create your models here.
class User(models.Model):
name=models.CharField(max_length=50)
password=models.CharField( max_length=50)
email=models.EmailField(max_length=254) | [
"67116285+nitesh5801@users.noreply.github.com"
] | 67116285+nitesh5801@users.noreply.github.com |
7babd85454a8e2bec58fc5926ec2482505fb44fb | ec61b57a99d7683a668f4910c9fad4b1c9335525 | /finished/53-maximumSubarray.py | e78beb987e0265416cb1db0da125c666ff430f35 | [] | no_license | savadev/leetcode-2 | 906f467e793b9636965ab340c7753d9fc15bc70a | 20f37130236cc68224082ef056dacd6accb374e3 | refs/heads/master | 2020-12-28T12:54:08.355317 | 2018-08-06T00:44:24 | 2018-08-06T00:44:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if all([x < 0 for x in nums]):
return max(nums)
l = len(nums)
b = [0] * l
for i in range(l):
val = nums[i]
if i == 0:
if val > 0:
b[i] = val
else:
opt = b[i-1]
if opt + val > 0:
b[i] = opt + val
MAX = max(b)
argmax = b.index(MAX)
for idx in range(argmax, -1,-1):
val = b[idx]
if val <= 0:
idx += 1
break
array = nums[idx:argmax + 1]
return sum(array)
r = Solution()
res = r.maxSubArray([-2,1,-3,4,-1,2,1,-5,4])
print(res)
| [
"nmaswood@cs.uchicago.edu"
] | nmaswood@cs.uchicago.edu |
aa601f15398d031e2284e689bdb89ab4ad82d09d | a99a44aee5cfc5e080f6d83d2bcc1c3d273a3426 | /scripts/qc/check_afos_sources.py | 521ec97bc54d4c2cc9aea5845e64ed0946fccb1c | [
"MIT"
] | permissive | ragesah/iem | 1513929c8bc7f254048271d61b4c4cf27a5731d7 | 8ed970d426bddeaa3e7ded593665d22f0f9f6e87 | refs/heads/main | 2023-08-20T20:01:15.480833 | 2021-10-12T15:44:52 | 2021-10-12T15:44:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,890 | py | """
Look at the sources saved to the AFOS database and then whine about
sources we do not understand!
"""
import sys
import datetime
import pytz
from pyiem.network import Table as NetworkTable
from pyiem.util import get_dbconn, utc, logger
LOG = logger()
pgconn = get_dbconn("afos")
cursor = pgconn.cursor()
cursor2 = pgconn.cursor()
nt = NetworkTable(["WFO", "RFC", "NWS", "NCEP", "CWSU", "WSO"])
def sample(source, ts):
"""Print out something to look at"""
cursor2.execute(
"SELECT pil, entered, wmo from products where entered >= %s "
"and entered < %s and source = %s",
(ts, ts + datetime.timedelta(hours=24), source),
)
pils = []
for row in cursor2:
if row[0] in pils:
continue
pils.append(row[0])
valid = row[1].astimezone(pytz.UTC)
uri = (
"https://mesonet.agron.iastate.edu/p.php?pid=%s-%s-%s-%s" ""
) % (valid.strftime("%Y%m%d%H%M"), source, row[2], row[0])
print(" %s" % (uri,))
def look4(ts):
"""Let us investigate"""
cursor.execute(
"SELECT source, count(*) from products WHERE entered >= %s "
"and entered < %s and source is not null "
"GROUP by source ORDER by count DESC",
(ts, ts + datetime.timedelta(hours=24)),
)
for row in cursor:
source = row[0]
lookup = source[1:] if source[0] == "K" else source
if lookup not in nt.sts and source[0] in ["K", "P"]:
print("%s %s" % (row[0], row[1]))
sample(source, ts)
def main(argv):
"""Go Main Go"""
if len(argv) == 4:
ts = utc(int(argv[1]), int(argv[2]), int(argv[3]))
else:
ts = utc() - datetime.timedelta(days=1)
ts = ts.replace(hour=0, minute=0, second=0, microsecond=0)
LOG.debug("running for %s", ts)
look4(ts)
if __name__ == "__main__":
main(sys.argv)
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
ca35abf6daa26f72594d90c4d13e76b76739ac1f | f6703b2afca284bf75e0dbf8f61d77e5251f905c | /euler445.py | 0e3f9ec0bbd0c78fee38e6915e183da0d10c8d7c | [] | no_license | rwieckowski/project-euler-python | 2a7aa73670b4684f076ad819bfc464aa0778f96c | be9a455058b20adfd32c814effd8753cc9d39890 | refs/heads/master | 2021-01-10T21:10:44.875335 | 2015-06-23T13:29:58 | 2015-06-23T13:29:58 | 37,920,684 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | """
<P>For every integer ngt1 the family of functions f<sub>nab</sub> is
defined by f<sub>nab</sub><var>x</var>equiva<var>x</var>b mod n for ab
<var>x</var> integer and 0ltaltn 0lebltn 0le<var>x</var>ltn<BR />We
will call f<sub>nab</sub> a <i>retraction</i> if f<sub>nab</sub>f<sub>
nab</sub><var>x</var>equivf<sub>nab</sub><var>x</var> mod n for every
0le<var>x</var>ltn<BR />Let Rn be the number of retractions for n</P>
<P>You are given that<BR />sum Rc for cC100 000k and 1 le k le99 999
equiv628701600 mod 1 000 000 007<BR />Cnk is the binomial coefficient
<BR /></P><P> Find sum Rc for cC10 000 000k and 1 lekle 9 999 999
<BR />Give your answer modulo 1 000 000 007</P>
"""
def euler445():
"""
>>> euler445()
'to-do'
"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | [
"rwieckowski@ivmx.pl"
] | rwieckowski@ivmx.pl |
5048abd346979c31fa2bad6f1ed859ca05094696 | 1cc8604dff9713d3879599f1876a6ea313ebe1fb | /pysc2/lib/memoize.py | 91894b0ec613e49bd815ba54d02e3192b38d5258 | [
"Apache-2.0"
] | permissive | SoyGema/pysc2 | c363ec768ebf94e7b0fa08e136b36b7432ae1b44 | e5de62023ec45ac212016b5404dd73272109d9d4 | refs/heads/master | 2022-02-08T21:41:46.530129 | 2022-01-29T13:11:15 | 2022-01-29T13:11:15 | 143,897,552 | 1 | 0 | Apache-2.0 | 2018-08-07T16:06:23 | 2018-08-07T16:06:22 | null | UTF-8 | Python | false | false | 1,095 | py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A memoization decorator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def memoize(func):
"""Memoization decorator."""
class Memodict(dict):
"""A memoization decorator dict."""
__slots__ = ()
__name__ = func.__name__
__doc__ = func.__doc__
def __call__(self, *args):
return self[args]
def __missing__(self, args):
ret = self[args] = func(*args)
return ret
return Memodict()
| [
"tewalds@google.com"
] | tewalds@google.com |
a5fb2a518a351640365f63b92e44e08cceae715b | 22a68f6efd44bb8b366669f73b0657a232296665 | /cfme/tests/control/test_bugs.py | fcd7c30ee233a2ef11e15ec79cdf77bf97142ae9 | [] | no_license | richardfontana/cfme_tests | e33261a809d229b515aac2c9ee730a88da9b2510 | 73b3e1b0717b5cb0449157dc5d85e89820b62c57 | refs/heads/master | 2021-01-15T19:24:11.424107 | 2015-09-14T19:00:11 | 2015-09-14T19:00:11 | 42,489,462 | 0 | 0 | null | 2015-09-15T02:16:33 | 2015-09-15T02:16:33 | null | UTF-8 | Python | false | false | 6,226 | py | # -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme.control.explorer import PolicyProfile, VMCompliancePolicy, Action, VMControlPolicy
from cfme.infrastructure.virtual_machines import (assign_policy_profiles, get_first_vm_title,
unassign_policy_profiles, Vm)
from utils.log import logger
from utils.providers import setup_a_provider as _setup_a_provider
from utils.wait import wait_for
@pytest.fixture(scope="module")
def setup_a_provider():
return _setup_a_provider("infra")
@pytest.fixture(scope="module")
def vmware_provider():
return _setup_a_provider("infra", "virtualcenter")
@pytest.fixture(scope="module")
def vmware_vm(request, vmware_provider):
vm = Vm("test_control_{}".format(fauxfactory.gen_alpha().lower()), vmware_provider)
vm.create_on_provider(find_in_cfme=True)
request.addfinalizer(vm.delete_from_provider)
return vm
@pytest.mark.meta(blockers=[1155284])
@pytest.mark.ignore_stream("5.2")
def test_scope_windows_registry_stuck(request, setup_a_provider):
"""If you provide Scope checking windows registry, it messes CFME up. Recoverable."""
policy = VMCompliancePolicy(
"Windows registry scope glitch testing Compliance Policy",
active=True,
scope=r"fill_registry(HKLM\SOFTWARE\Microsoft\CurrentVersion\Uninstall\test, "
r"some value, INCLUDES, some content)"
)
request.addfinalizer(lambda: policy.delete() if policy.exists else None)
policy.create()
profile = PolicyProfile(
"Windows registry scope glitch testing Compliance Policy",
policies=[policy]
)
request.addfinalizer(lambda: profile.delete() if profile.exists else None)
profile.create()
# Now assign this malformed profile to a VM
vm = get_first_vm_title()
assign_policy_profiles(vm, profile.description, via_details=True)
# It should be screwed here, but do additional check
pytest.sel.force_navigate("dashboard")
pytest.sel.force_navigate("infrastructure_virtual_machines")
assert "except" not in pytest.sel.title().lower()
unassign_policy_profiles(vm, profile.description, via_details=True)
@pytest.mark.meta(blockers=[1209538], automates=[1209538])
def test_folder_field_scope(request, vmware_provider, vmware_vm):
"""This test tests the bug that makes the folder filter in expression not work.
Prerequisities:
* A VMware provider.
* A VM on the provider.
* A tag to assign.
Steps:
* Read the VM's 'Parent Folder Path (VMs & Templates)' from its summary page.
* Create an action for assigning the tag to the VM.
* Create a policy, for scope use ``Field``, field name
``VM and Instance : Parent Folder Path (VMs & Templates)``, ``INCLUDES`` and the
folder name as stated on the VM's summary page.
* Assign the ``VM Discovery`` event to the policy.
* Assign the action to the ``VM Discovery`` event.
* Create a policy profile and assign the policy to it.
* Assign the policy profile to the provider.
* Delete the VM from the CFME database.
* Initiate provider refresh and wait for VM to appear again.
* Assert that the VM gets tagged by the tag.
"""
# Retrieve folder location
folder = None
tags = vmware_vm.get_tags()
for tag in tags:
if "Parent Folder Path (VMs & Templates)" in tag:
folder = tag.split(":", 1)[-1].strip()
logger.info("Detected folder: {}".format(folder))
break
else:
pytest.fail("Could not read the folder from the tags:\n{}".format(repr(tags)))
# Create Control stuff
action = Action(
fauxfactory.gen_alpha(),
"Tag", dict(tag=("My Company Tags", "Service Level", "Platinum")))
action.create()
request.addfinalizer(action.delete)
policy = VMControlPolicy(
fauxfactory.gen_alpha(),
scope=(
"fill_field(VM and Instance : Parent Folder Path (VMs & Templates), "
"INCLUDES, {})".format(folder)))
policy.create()
request.addfinalizer(policy.delete)
policy.assign_events("VM Discovery")
request.addfinalizer(policy.assign_events) # Unassigns
policy.assign_actions_to_event("VM Discovery", action)
profile = PolicyProfile(fauxfactory.gen_alpha(), policies=[policy])
profile.create()
request.addfinalizer(profile.delete)
# Assign policy profile to the provider
vmware_provider.assign_policy_profiles(profile.description)
request.addfinalizer(lambda: vmware_provider.unassign_policy_profiles(profile.description))
# Delete and rediscover the VM
vmware_vm.remove_from_cfme()
vmware_vm.wait_for_delete()
vmware_provider.refresh_provider_relationships()
vmware_vm.wait_to_appear()
# Wait for the tag to appear
wait_for(
vmware_vm.get_tags, num_sec=600, delay=15,
fail_condition=lambda tags: "Service Level: Platinum" not in tags, message="vm be tagged")
@pytest.mark.meta(blockers=[1243357], automates=[1243357])
def test_invoke_custom_automation(request):
"""This test tests a bug that caused the ``Invoke Custom Automation`` fields to disappear.
Steps:
* Go create new action, select Invoke Custom Automation
* The form with additional fields should appear
"""
# The action is to have all possible fields filled, that way we can ensure it is good
action = Action(
fauxfactory.gen_alpha(),
"Invoke a Custom Automation",
dict(
message=fauxfactory.gen_alpha(),
request=fauxfactory.gen_alpha(),
attribute_1=fauxfactory.gen_alpha(),
value_1=fauxfactory.gen_alpha(),
attribute_2=fauxfactory.gen_alpha(),
value_2=fauxfactory.gen_alpha(),
attribute_3=fauxfactory.gen_alpha(),
value_3=fauxfactory.gen_alpha(),
attribute_4=fauxfactory.gen_alpha(),
value_4=fauxfactory.gen_alpha(),
attribute_5=fauxfactory.gen_alpha(),
value_5=fauxfactory.gen_alpha(),))
@request.addfinalizer
def _delete_action():
if action.exists:
action.delete()
action.create()
| [
"mfalesni@redhat.com"
] | mfalesni@redhat.com |
a86582827ffca20f3f6c23a1a6becdb5c8793c40 | 5a6e91490dafa97f0c52a63eff5a39dd16b7122e | /pythontut13.py | 1e38abf421057b0a64475618d4bcd4b572867d4c | [] | no_license | suvimanikandan/PYTHON_PROGRAMS | 968884292ceef502b3f3b6acdea79ce192e933f8 | f4be986232bf1adef05837d67249840965fa50fc | refs/heads/main | 2023-07-15T19:15:06.567198 | 2021-08-24T14:30:08 | 2021-08-24T14:30:08 | 399,495,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,499 | py | # VIDEO 13 : Dictionaries
# While lists organize data based on sequential indexes Dictionaries instead use key / value pairs
# A key / value pair could be fName : "Derek" where fName is the key and "Derek" is the value
# Here is some code to help this make sense
# Create a Dictionary about me
swe_dict = {"f_name": "Swetha", "l_name": "Manikandan", "address": "123 Main St"}
# Get a value with the ke
print("May name :", swe_dict["f_name"])
# Change a value with the key
swe_dict["address"] = "215 North St"
# Dictionaries may not print out in the order created
# since they are unordered
print(swe_dict)
# Add a new key value
swe_dict['city'] = 'Pittsburgh'
# Check if a key exists
print("Is there a city :", "city" in swe_dict)
# Get the list of values
print(swe_dict.values())
# Get the list of keys
print(swe_dict.keys())
# Get the key and value with items()
for k, v in swe_dict.items():
print(k, v)
# Get gets a value associated with a key or the default
print(swe_dict.get("m_name", "Not Here"))
# Delete a key value
del swe_dict["f_name"]
# Loop through the dictionary keys
for i in swe_dict:
print(i)
# Delete all entries
swe_dict.clear()
# List for holding Dictionaries
employees = []
# Input employee data
f_name, l_name = input("Enter Employee Name : ").split()
employees.append({'f_name': f_name, 'l_name': l_name})
print(employees)
'''
Python Problem for you to Solve
Create an array of customer dictionaries and the output should look like this :
Enter Customer (Yes/No) : y
Enter Customer Name : Derek Banas
Enter Customer (Yes/No) : y
Enter Customer Name : Sally Smith
Enter Customer (Yes/No) : n
Derek Banas
Sally Smith
'''
# Create customer array outside the for so it isn't local
# to the while loop
customers = []
while True:
# Cut off the 1st letter to cover if the user
# types a n or y
create_entry = input("Enter Customer (Yes/No) : ")
create_entry = create_entry[0].lower()
if create_entry == "n":
# Leave the while loop when n is entered
break
else:
# Get the customer name by splitting at the space
f_name, l_name = input("Enter Customer Name : ").split()
# Add the dictionary to the array
customers.append({'f_name': f_name, 'l_name': l_name})
# Print out customer list
for cust in customers:
print(cust['f_name'], cust['l_name'])
| [
"noreply@github.com"
] | suvimanikandan.noreply@github.com |
c92e9dd202a1022d5f5b9724a6a5acc6762c5135 | cb583e6d4b79eca522cd6365f1ef60d2828a941c | /ambry/cli/source_run/zero_deps.py | 794ec38b19ea01ddf05db24b0f6ee6f9851b1f26 | [
"BSD-2-Clause"
] | permissive | mihailignatenko/ambry | c93e8f8d933e700b75530a32f584267cb1e937fa | faea74eee1449896e64d1945f3e814936fe9c276 | refs/heads/master | 2021-01-21T15:58:15.791663 | 2015-05-03T04:53:58 | 2015-05-03T04:53:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | """Script for source run to print the umber of dependencies that a source
bundle has."""
def run(args, bundle_dir, bundle):
import sys
# if bundle.is_built:
# return
deps = bundle.metadata.dependencies
if len(deps) == 0:
print bundle.identity.fqname
| [
"eric@clarinova.com"
] | eric@clarinova.com |
e431c4385feb041bc34a7352ac22ced4868b80a1 | aced407b41f6669f69e9eb8bd599260d50c0bd3f | /server/libs/top/api/rest/UmpToolUpdateRequest.py | 715305dddaa4f6beac8ea342190c26bf28a50f2c | [] | no_license | alswl/music_sofa | 42f7d15431f11b97bf67b604cfde0a0e9e3860cc | c4e5425ef6c80c3e57c91ba568f7cbfe63faa378 | refs/heads/master | 2016-09-12T18:37:34.357510 | 2016-05-20T11:49:52 | 2016-05-20T11:49:52 | 58,946,171 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | '''
Created by auto_sdk on 2013-11-07 12:53:22
'''
from top.api.base import RestApi
class UmpToolUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.content = None
self.tool_id = None
def getapiname(self):
return 'taobao.ump.tool.update'
| [
"alswlx@gmail.com"
] | alswlx@gmail.com |
b950977a9d69e7c25f6b702fb9b06ce423d9a44f | 5ede2fe8a1e2b9d468a2151180b39e0d0f1b0e4e | /PyML/.venv/py3/bin/jupyter-migrate | 056bf6e95f435c37e0c606da7053813cf6d5012b | [] | no_license | rupeshsm/PyCode | a4b1b19f1fce1ac8354a7dd31c23cc44416010e8 | 67b47e71802bbe109a65cc373ae01f662c32d228 | refs/heads/master | 2021-01-23T22:06:29.601993 | 2017-02-17T18:19:01 | 2017-02-17T18:19:01 | 83,118,069 | 1 | 0 | null | 2017-02-25T08:29:11 | 2017-02-25T08:29:11 | null | UTF-8 | Python | false | false | 259 | #!/home/achilles/dev/PyCode/PyML/.venv/py3/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.migrate import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"achillesrasquinha@gmail.com"
] | achillesrasquinha@gmail.com | |
5ddc90304d10e80c46e9195d779dd4bc9411340d | 69930e647aca06cf865f958e952941a410cad46b | /usuario/models.py | 1615cf667ccf7cf125b43604b57749e83a952bae | [] | no_license | GustavoGarciaPereira/trabalho_babrica_de_moveis | 9ce32a76eb4c9c5fed8f1482aa8ce6814b350a96 | d61b6f3ea09b1b2f0c3781cbc67e8e4ed11f7949 | refs/heads/master | 2023-08-14T03:50:48.738174 | 2020-07-03T11:42:21 | 2020-07-03T11:42:21 | 276,883,600 | 0 | 0 | null | 2021-09-22T19:22:24 | 2020-07-03T11:27:51 | Python | UTF-8 | Python | false | false | 1,637 | py | from django.db import models
from django.urls import reverse
# Create your models here.
class Pessoa(models.Model):
nome = models.CharField(name="nome",max_length=30)
tipo = models.CharField(name="tipo",max_length=30)
email = models.EmailField()
senha = models.CharField(name="senha",max_length=30)
cpf = models.IntegerField()
data_nascimento = models.DateField()
rua = models.CharField(name="rua",max_length=30)
numero = models.IntegerField()
cep = models.IntegerField()
bairro = models.CharField(name="bairro",max_length=30)
cidade = models.CharField(name="cidade",max_length=30)
numero_telefome = models.CharField(name="numero_telefome",max_length=30)
descricao = models.CharField(name="descricao",max_length=30)
@property
def get_detalhes_url(self):
return reverse('detalhes-cliente', kwargs={'pk': self.pk})
def __str__(self):
return "{}".format(self.nome)
class Projeto(models.Model):
responsavel = models.CharField(name="responsavel",max_length=30)
colaborador = models.CharField(name="colaborador",max_length=30)
cliente = models.CharField(name="cliente",max_length=30)
titulo = models.CharField(name="titulo",max_length=30)
material = models.CharField(name="material",max_length=30)
quantidade = models.IntegerField()
valor = models.FloatField()
arquivo = models.FileField(upload_to='projeto/', null=True,blank=True)
status = models.BooleanField(name="status",default=False)
@property
def get_absolute_url(self):
return reverse('update-projeto', kwargs={'pk': self.pk})
| [
"gusgurtavo@gmail.com"
] | gusgurtavo@gmail.com |
a5daedad2cfef500c84634a501ae8d22bba63646 | 5af97bffa345fee6a0a26cc8e24756b41f83c66c | /actors/world_related/connectors/__init__.py | 1b9293028b76db28cfa5761a24ab2b1825cb8692 | [] | no_license | Myshj/tdoa_multisensor_model | 26620bb687503c6b40ad886a0d666c10a34025d8 | eb2dedd9e3c3918c17177ce9d63355e10d4649dd | refs/heads/master | 2020-07-30T15:39:32.162225 | 2017-03-08T12:01:04 | 2017-03-08T12:01:04 | 73,627,161 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from .Base import Base
from .SensorSupervisorToTDOAGroupSupervisorConnector import SensorSupervisorToTDOAGroupSupervisorConnector
from .SoundSourceToPropagatorConnector import SoundSourceToPropagatorConnector
| [
"donmarin@mail.ru"
] | donmarin@mail.ru |
c1ec71577f4a90958a796d2a31c5c7c8a20ff20d | e18a48bab6bba2bcc7d8b7846f5921f76e2eed85 | /51. N-Queens.py | f0d2fa5aa2eef63a990cbc7f1de8bc8817fd8d0c | [] | no_license | shivasupraj/LeetCode | 384a2c5a857b1ccb952f8410675c5500e630a766 | 834de770fb2a60dea468238611fca9389e3a1e96 | refs/heads/master | 2021-06-27T11:49:16.452450 | 2019-06-05T03:32:44 | 2019-06-05T03:32:44 | 113,106,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | class Solution(object):
def solveNQueens(self, n):
"""
:type n: int
:rtype: List[List[str]]
"""
def solveNQueensHelper(matrix, index):
#print(matrix, index)
if index == -1:
strs = getStrsFromMatrix(matrix)
res.append(strs)
return False
for i in range(len(matrix) - 1, -1, -1):
#print(matrix, index, i)
if validQueenPosition(matrix, index, i):
#print(matrix, index, i, True)
matrix[index][i] = 'Q'
if solveNQueensHelper(matrix, index - 1):
pass
else:
matrix[index][i] = '.'
return False
def validQueenPosition(matrix, row, col):
s_i = 0; e_i = len(matrix) - 1
s_j = 0; e_j = len(matrix) - 1
#print(row, col, 1)
for j in range(len(matrix)):
if matrix[row][j] == 'Q':
return False
for i in range(len(matrix)):
#print(i, col)
if matrix[i][col] == 'Q':
return False
i = row; j = col
while i >= 0 and j >= 0:
if matrix[i][j] == 'Q':
return False
i -= 1; j -= 1
i = row; j = col
while i <= e_i and j <= e_j:
if matrix[i][j] == 'Q':
return False
i += 1; j += 1
i = row; j = col
while i >= 0 and j <= e_j:
if matrix[i][j] == 'Q':
return False
i -= 1; j += 1
i = row; j = col
while i <= e_i and j >= 0:
if matrix[i][j] == 'Q':
return False
i += 1; j -= 1
return True
def getStrsFromMatrix(matrix):
res = []
for row in matrix:
res.append(''.join(row))
return res
matrix = [[ '.' for j in range(n)] for i in range(n)]
res = []
index = n - 1
solveNQueensHelper(matrix, index)
return res
| [
"noreply@github.com"
] | shivasupraj.noreply@github.com |
c77f17c90c2e269b76573233eba64697c0f2bdea | bf62bdb3f2991ee000c056246bf08e5797154561 | /docs/conf.py | 2750230ed2fed90e8ddce80a431b159a834b42ea | [
"MIT"
] | permissive | wlc123456/fsmlite | ec0e9f5c800a4301fb5e23c35885b67064295190 | d785362578a9cb28eda50f664d7ea866b29c8606 | refs/heads/master | 2023-06-21T06:32:46.883165 | 2021-08-02T20:07:44 | 2021-08-02T20:07:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,631 | py | import os
def get_version(filename):
from re import findall
with open(filename) as f:
versions = dict(findall(r'AC_INIT\(\[(.*?)\],\s*\[(.*?)\].*\)', f.read()))
return versions['fsmlite']
# https://github.com/rtfd/readthedocs.org/issues/388
if os.environ.get('READTHEDOCS', None) == 'True':
from subprocess import call
call('doxygen')
# TODO: extract from autoconf?
project = 'fsmlite'
author = 'Thomas Kemmer'
version = get_version(b'../configure.ac')
release = version
copyright = '%s, %s' % ('2015-2021', author)
master_doc = 'index'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'breathe',
]
breathe_projects = { project: 'doxyxml/' }
breathe_default_project = project
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
#htmlhelp_basename = 'ReadTheDocs-Breathedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, project + '.tex', project + ' Documentation', author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project, project + ' Documentation', [author], 3),
]
# If true, show URL addresses after external links.
#man_show_urls = False
| [
"tkemmer@computer.org"
] | tkemmer@computer.org |
4c6488ab6aa55d852b4537853720c95c261c80b6 | 9f2445e9a00cc34eebcf3d3f60124d0388dcb613 | /2019-08-19-GHKinMOOSE/somasimp_MOOSE_ghk.py | c0f301e8c05d263532c28fee43c2de51ded85a5e | [] | no_license | analkumar2/Thesis-work | 7ee916d71f04a60afbd117325df588908518b7d2 | 75905427c2a78a101b4eed2c27a955867c04465c | refs/heads/master | 2022-01-02T02:33:35.864896 | 2021-12-18T03:34:04 | 2021-12-18T03:34:04 | 201,130,673 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # exec(open('somasimp_MOOSE_ghk.py').read())
import moose
import numpy as np
import matplotlib.pyplot as plt
import rdesigneur as rd
try:
# [moose.delete(x) for x in ['/model', '/library']]
[moose.delete(x) for x in ['/model']]
except:
pass
ChP = 'simplechan_ghk'
F = 96485.3329
sm_diam = 500e-6
sm_len = 100e-6
sm_vol = np.pi/4*sm_diam**2*sm_len
sm_area = np.pi*sm_diam*sm_len
| [
"analkumar2@gmail.com"
] | analkumar2@gmail.com |
53ec99187d84b613f4bf21c21ebb1ab6f074405a | 1fe0b680ce53bb3bb9078356ea2b25e572d9cfdc | /venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_webfilter_ips_urlfilter_setting.py | d05b42141aabf1983b8f58f7b7a06b67fc4c0d44 | [
"MIT"
] | permissive | otus-devops-2019-02/devopscourses_infra | 1929c4a9eace3fdb0eb118bf216f3385fc0cdb1c | e42e5deafce395af869084ede245fc6cff6d0b2c | refs/heads/master | 2020-04-29T02:41:49.985889 | 2019-05-21T06:35:19 | 2019-05-21T06:35:19 | 175,780,457 | 0 | 1 | MIT | 2019-05-21T06:35:20 | 2019-03-15T08:35:54 | HCL | UTF-8 | Python | false | false | 7,628 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_ips_urlfilter_setting
short_description: Configure IPS URL filter settings.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure webfilter feature and ips_urlfilter_setting category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
webfilter_ips_urlfilter_setting:
description:
- Configure IPS URL filter settings.
default: null
suboptions:
device:
description:
- Interface for this route. Source system.interface.name.
distance:
description:
- Administrative distance (1 - 255) for this route.
gateway:
description:
- Gateway IP address for this route.
geo-filter:
description:
- Filter based on geographical location. Route will NOT be installed if the resolved IP address belongs to the country in the filter.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPS URL filter settings.
fortios_webfilter_ips_urlfilter_setting:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
webfilter_ips_urlfilter_setting:
device: "<your_own_value> (source system.interface.name)"
distance: "4"
gateway: "<your_own_value>"
geo-filter: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_webfilter_ips_urlfilter_setting_data(json):
option_list = ['device', 'distance', 'gateway',
'geo-filter']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def webfilter_ips_urlfilter_setting(data, fos):
vdom = data['vdom']
webfilter_ips_urlfilter_setting_data = data['webfilter_ips_urlfilter_setting']
filtered_data = filter_webfilter_ips_urlfilter_setting_data(
webfilter_ips_urlfilter_setting_data)
return fos.set('webfilter',
'ips-urlfilter-setting',
data=filtered_data,
vdom=vdom)
def fortios_webfilter(data, fos):
login(data)
methodlist = ['webfilter_ips_urlfilter_setting']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"webfilter_ips_urlfilter_setting": {
"required": False, "type": "dict",
"options": {
"device": {"required": False, "type": "str"},
"distance": {"required": False, "type": "int"},
"gateway": {"required": False, "type": "str"},
"geo-filter": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_webfilter(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"skydevapp@gmail.com"
] | skydevapp@gmail.com |
f331c316f8a15c4b36891d58db96e4389d96c074 | daaf7e8ff13844ab31ce774029377d1cbfe21e26 | /Month 02/Week 01/Day 05/c.py | eecfb554f86114eafbd5c3875d304aafb081b5ba | [
"MIT"
] | permissive | KevinKnott/Coding-Review | 7ae370937879179b0daec3ebd91525dc89c498e9 | 6a83cb798cc317d1e4357ac6b2b1fbf76fa034fb | refs/heads/main | 2023-07-18T09:07:53.100342 | 2021-09-03T23:26:55 | 2021-09-03T23:26:55 | 373,020,791 | 0 | 0 | MIT | 2021-09-03T23:26:56 | 2021-06-02T02:51:12 | Python | UTF-8 | Python | false | false | 3,044 | py | # First Missing Positive: https://leetcode.com/problems/first-missing-positive/
# Given an unsorted integer array nums, find the smallest missing positive integer.
# You must implement an algorithm that runs in O(n) time and uses constant extra space.
# The brute force of this is to look through all of nums for each 1 -> k until you find the missing k
# this is quite bad as you will need to duplicate lots of work
# Alternatively you could sort the array and do the same thing as you know that you can quickly
# find the first missing by mobing i and left pointer across this is o(nlogn)
# The problem suggests that we could do this in o(n) with constant space and I believe the way to
# do this is to simply swap the value to negative if we have seen it and the first index that
# isn't negative is the one that is missing
class Solution:
def firstMissingPositive(self, nums) -> int:
# Check if we even have the start otherwise this doesn't matter
hasOne = False
# We also need to remove invalid values from our list aka negatives and 0s as they
# Give us false positives
for i in range(len(nums)):
if nums[i] == 1:
hasOne = True
if nums[i] <= 0:
# Since we already checked for one we can
# Simply make all invalid numbers equal to one
nums[i] = 1
if not hasOne:
return 1
# Also check if we have just one
if hasOne and len(nums) == 1:
return 2
for index in range(len(nums)):
# get our current number and make sure it is the positive version
curNumber = abs(nums[index])
# If the location in the area is positive turn it to negative to mark that we have seen it
if curNumber - 1 < len(nums) and nums[curNumber - 1] > 0:
nums[curNumber - 1] = -nums[curNumber - 1]
# Now that everything is marked we can run a for loop and find the first missing based off of the
# first positive we see
for i in range(len(nums)):
if nums[i] > 0:
return i + 1
return len(nums) + 1
# My initial intuiton on this problem was correct there are a few weird things we want to take care of that I hadn't considered
# If we have 0 or a negative value we need to remove them but how do we do so?
# The answer is we check if we have one and then make all those values = to 1 so that we know what to add these values with
# Now is there other optimizations on this problem? For sure by using a set we can have an o(1) look up so just loop
# n times and add all numbers to a set and then simply loop from 1 -> k and see if it is in set which will run in o(k) time and o(n) space
# Score Card
# Did I need hints? Nope
# Did you finish within 30 min? 30
# Was the solution optimal? See above
# Were there any bugs? i forgot to add a return that says len(nums) + 1 at the end in case we have every number from 1 -> len(nums)
# 5 3 5 3 = 4
| [
"KKnoTT93@gmail.com"
] | KKnoTT93@gmail.com |
89034088048e3eb1ec1128d148ff89ec503144b2 | ab397ff967a4ea0c0bf3b1ab286556071115b197 | /Analysis/checker_files.py | 0eebcdc0b01247c60b8074c9d3125d02d14fec68 | [] | no_license | GihanMora/Accelerometer_podiatry_gihan | 404d9db9da618259de1c484f6783e8b4c7bf8f23 | e2164574593159a89465e8d23c838fd9c7ed758a | refs/heads/master | 2023-01-29T02:22:56.692869 | 2020-12-08T05:25:49 | 2020-12-08T05:25:49 | 317,770,532 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | import os
root_folder = 'E:/Data/Accelerometer_Dataset_Rashmika/Podiatry Data/'
pred_root = 'E:/Data/Accelerometer_Dataset_Rashmika/Podiatry Data/Predictions/'
data_dict = {}
for i in range(1,24):
if(i==4):continue
f_path = os.path.join(root_folder,str(i))
file_list = os.listdir(f_path)
d_file_path = ''
if (i < 10):
st = 'Participant_00' + str(i)
else:
st = 'Participant_0' + str(i)
for each_p in file_list:
if('60sec AGD Details Epochs' in each_p):
d_file_path = os.path.join(root_folder,str(i),each_p)
pred_f_path = os.path.join(pred_root,st,'predictions.csv')
data_dict[i]=[d_file_path,pred_f_path]
# print(raw_file_path)
print(data_dict) | [
"gihangamage.15@cse.mrt.ac.lk"
] | gihangamage.15@cse.mrt.ac.lk |
1c332b2db8cfb4a7bffc0f933f0c234f05ffc3ac | 99bfa15723593ea351191d82fac80e36ab25aab1 | /CP/Stack/Stock_Span.py | c2e5316c58512b0747c2d1f1b6bc799e57488071 | [] | no_license | Kartavya-verma/Python-Projects | f23739ef29eab67a8e25569e3f7bf110e42576cb | 02ffe926a7ed82bc783e4c4034a2fa53d4d1a870 | refs/heads/master | 2023-06-22T07:59:39.595084 | 2021-07-18T15:51:55 | 2021-07-18T15:51:55 | 387,139,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | l = [100, 80, 60, 70, 60, 75, 85]
# op = [1, 1, 1, 2, 1, 4, 6]
st = []
res = []
for i in range(len(l)):
if len(st) == 0:
res.append(-1)
elif len(st) > 0 and st[-1][0] > l[i]:
res.append(st[-1][1])
elif len(st) > 0 and st[-1][0] <= l[i]:
while len(st) > 0 and st[-1][0] <= l[i]:
st.pop()
if len(st) == 0:
res.append(-1)
else:
res.append(st[-1][1])
st.append([l[i], i])
# print(res)
for i in range(len(l)):
res[i] = i - res[i]
print(res) | [
"vermakartavya2000@gmail.com"
] | vermakartavya2000@gmail.com |
a0dcbf8c261c18e893a3f65fd7dc1ee8185ade11 | db09f9fc5729eec823ee859fd9bac7461cce60f5 | /src/conf/urls.py | 6a3f40afd62d8a5d11e124316170f6c28da813d4 | [] | no_license | cephey/rent | 9323ca42ea3a8d34bf1ea4b6ffaf0386782d19a3 | 7eabae2f0df8756a05553df3b8e3b072459dcb51 | refs/heads/master | 2016-09-10T10:16:19.420888 | 2014-10-12T20:02:34 | 2014-10-12T20:02:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | #coding:utf-8
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.conf import settings
urlpatterns = patterns(
'',
url(r'^api/', include('api.urls')),
url(r'', include('pages.urls', namespace='pages')),
url(r'^users/', include('users.urls', namespace='users')),
url(r'^inventory/', include('inventory.urls', namespace='inventory')),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"andrey.ptitsyn86@gmail.com"
] | andrey.ptitsyn86@gmail.com |
af3df78cdc812556d8b6fdf80fefcf41787f4be5 | 6cdab6432f37471f1c7ce74f461568accf04aa85 | /dev/a_import.py | 1d8cbb6f3b631ea3b1a2a93d0a340d0734922eaa | [
"Apache-2.0"
] | permissive | exaxorg/Kaggle-CORD19-data-parser | 71709d549f2be108b0737d252547ad94f9b3dcb4 | bf78daee155645aa3dd7f58dc9dcc2f55cdc0092 | refs/heads/master | 2022-04-25T04:09:27.114630 | 2020-03-30T19:17:48 | 2020-04-29T20:29:15 | 258,124,864 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,346 | py | from json import loads
from glob import glob
from os.path import join
options = dict(directory='')
datasets = ('previous',)
def parse(data):
# "data" is a dict corresponding to each file's json-data.
sha = data['paper_id']
title = data['metadata']['title']
abstract = data.get('abstract', ())
if abstract:
# typle of text paragraphs
abstract = tuple(element['text'] for element in data['abstract'])
body = data['body_text']
# tuple of (section title, paragraph text) for each paragraph in body
body = tuple((element['section'], element['text']) for element in body)
bib = {key: val['title'] for key, val in data['bib_entries'].items()}
ref = {key: val['text'] for key, val in data['ref_entries'].items()}
back = tuple(element['text'] for element in data['back_matter'])
return {
'sha': sha,
'title': title,
'abstract': abstract,
'body': body,
'bib': bib,
'ref': ref,
'back': back,
}
def prepare(job):
# prepare() is run first, then "analysis()", and then "synthesis()", if they exist.
path = join(job.input_directory, options.directory)
filenames = sorted(glob(join(path, '*.json')))
assert len(filenames) > 0, 'No json-files in directory \"%s\"' % (path,)
# create a datasetwriter instance
dw = job.datasetwriter(previous=datasets.previous, filename=path) # path, since no single filename exists
dw.add('sha', 'unicode')
dw.add('title', 'unicode')
dw.add('abstract', 'json') # (text, ...)
dw.add('body', 'json') # ((section, text), ...)
dw.add('bib', 'json') # {ref:title, ...}
dw.add('ref', 'json') # {ref:text, ...}
dw.add('back', 'json') # (text, ...)
dw.add('filename', 'unicode')
dw.add('exabstract', 'unicode') # lowercase joined string
return filenames, dw
def analysis(prepare_res, sliceno, slices):
# analysis() is parallelised into "slices" processes. "sliceno" is the unique process id.
filenames, dw = prepare_res # output from prepare()
filenames = filenames[sliceno::slices] # pick a (1/slices)-fraction of all filenames for this slice
for fn in filenames:
with open(fn, 'rt') as fh:
data = loads(fh.read())
v = parse(data)
example_cleaned_abstract = '\\n'.join(x.lower().replace('\n', '\\n') for x in v['abstract'])
dw.write(v['sha'], v['title'], v['abstract'], v['body'], v['bib'], v['ref'], v['back'], fn,
example_cleaned_abstract,
)
| [
"anders@berkeman.org"
] | anders@berkeman.org |
e3b79ede83977fda5769cf4b0b28fd40d04f1465 | bf0d0b123c1a42a1db5b174ceac9d10d8f63d923 | /10.py | a4de318300bcee1d2f23e0175e39ed32623e8043 | [] | no_license | 1751660300/lanqiaobei | 30fd047a788288b85499f4ee754d60333cb5bb51 | 4f83f2cb5a15e0af3c220aae62a821b7fbfd2e1f | refs/heads/master | 2022-12-31T13:41:41.637481 | 2020-10-16T13:30:38 | 2020-10-16T13:30:38 | 304,635,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | # -*- coding:utf-8 -*-
# 问题描述
# 小明和朋友们一起去郊外植树,他们带了一些在自己实验室精心研究出的小树苗。
# 小明和朋友们一共有 n 个人,他们经过精心挑选,在一块空地上每个人挑选了一个适合植树的位置,总共 n 个。他们准备把自己带的树苗都植下去。
# 然而,他们遇到了一个困难:有的树苗比较大,而有的位置挨太近,导致两棵树植下去后会撞在一起。
# 他们将树看成一个圆,圆心在他们找的位置上。如果两棵树对应的圆相交,这两棵树就不适合同时植下(相切不受影响),称为两棵树冲突。
# 小明和朋友们决定先合计合计,只将其中的一部分树植下去,保证没有互相冲突的树。他们同时希望这些树所能覆盖的面积和(圆面积和)最大。
# 输入格式
# 输入的第一行包含一个整数 n ,表示人数,即准备植树的位置数。
# 接下来 n 行,每行三个整数 x, y, r,表示一棵树在空地上的横、纵坐标和半径。
# 输出格式
# 输出一行包含一个整数,表示在不冲突下可以植树的面积和。由于每棵树的面积都是圆周率的整数倍,请输出答案除以圆周率后的值(应当是一个整数)。
# 样例输入
# 6
# 1 1 2
# 1 4 2
# 1 7 2
# 4 1 2
# 4 4 2
# 4 7 2
# 样例输出
# 12
# 评测用例规模与约定
# 对于 30% 的评测用例,1 <= n <= 10;
# 对于 60% 的评测用例,1 <= n <= 20;
# 对于所有评测用例,1 <= n <= 30,0 <= x, y <= 1000,1 <= r <= 1000。 | [
"1751660300@qq.com"
] | 1751660300@qq.com |
d77f4e135813dcb4ab39913514dd69377bc0b9ec | c1bed5203d73a077ff83b96cc6fe71a3fce734e7 | /python/pe817.py | f824512558711e81ca12042c4c1cd4502f4a4be2 | [] | no_license | arknave/project-euler | 41006e571e24d8aa077247bdb806be3ba8818c40 | 647783721d4d019c41ec844500069baeab6d8a44 | refs/heads/master | 2022-12-23T06:36:14.647022 | 2022-12-14T23:20:49 | 2022-12-14T23:20:49 | 19,319,963 | 0 | 1 | null | 2015-05-21T06:42:41 | 2014-04-30T16:28:50 | Python | UTF-8 | Python | false | false | 1,479 | py | from math import isqrt
def digits(x, b):
while x > 0:
yield x % b
x //= b
def solve(b, d):
# find a small x such that x^2 has d as a digit in base b
# units digit: solve x^2 = d (mod b)
# every other digit: solve floor(x^2 / b^i) = aM + d for increasing a, i
# Theory: only have to check last two digits
"""
floor(x^2 / b^i) = k
x^2 / b^i >= k
x^2 >= b^i k
x >= sqrt(b^i k)
"""
opts = []
# first, the units digit
# because b % 4 == 3, this has an easy closed form
assert b % 4 == 3
r = pow(d, (b + 1) // 4, b)
if r * r % b == d:
opts.append(r)
r = -r % b
if r * r % b == d:
opts.append(r)
opts.sort()
lead = 0
b2 = b * b
term = b * d
while not opts or opts[-1] * opts[-1] > term:
x = isqrt(term)
while x * x < term:
x += 1
if (x * x // b) % b == d:
opts.append(x)
break
lead += 1
term += b2
# now try b^2 just for funsies
# x^2 = d b^2 + ....
x = isqrt(d * b * b)
while x * x < d * b * b:
x += 1
if x * x // b // b == d:
opts.append(x)
return opts
def main():
M = 1000000007
ans = 0
for d in range(1, 100000 + 1):
v = M - d
opts = solve(M, v)
s = min(opts)
assert v in list(digits(s * s, M)), (v, s, opts)
ans += s
print(ans)
if __name__ == '__main__':
main()
| [
"arnavsastry@gmail.com"
] | arnavsastry@gmail.com |
4742ec7b9ab060fc24f0884102ef783e8649474f | 65dce36be9eb2078def7434455bdb41e4fc37394 | /Topological Sorting.py | 921b932601ff357adecdcf18529150d2675a7752 | [] | no_license | EvianTan/Lintcode-Leetcode | 9cf2d2f6a85c0a494382b9c347bcdb4ee0b5d21a | d12dd31e98c2bf24acc20c5634adfa950e68bd97 | refs/heads/master | 2021-01-22T08:13:55.758825 | 2017-10-20T21:46:23 | 2017-10-20T21:46:23 | 92,607,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | '''
Given an directed graph, a topological order of the graph nodes is defined as follow:
For each directed edge A -> B in graph, A must before B in the order list.
The first node in the order can be any node in the graph with no nodes direct to it.
Find any topological order for the given graph.
'''
# Definition for a Directed graph node
# class DirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution:
"""
@param graph: A list of Directed graph node
@return: A list of graph nodes in topological order.
"""
def topSort(self, graph):
# write your code here
result, indegree = [], {}
for node in graph:
for neighbor in node.neighbors:
if neighbor in indegree:
indegree[neighbor] += 1
else:
indegree[neighbor] = 1
queue = []
for node in graph:
if node not in indegree:
queue.append(node)
result.append(node)
while queue:
current = queue.pop(0)
for neighbor in current.neighbors:
indegree[neighbor] -= 1
if indegree[neighbor] == 0:
queue.append(neighbor)
result.append(neighbor)
return result | [
"yiyun.tan@uconn.edu"
] | yiyun.tan@uconn.edu |
9990ac14503554e7dfba877b9302e17cb6a15d18 | aa480d8b09dd7ad92c37c816ebcace24a35eb34c | /5/305.岛屿数量-ii.py | 69c314ac62e7c60b139ff01357ff1cb290dbbee6 | [] | no_license | SR2k/leetcode | 7e701a0e99f9f05b21216f36d2f5ac07a079b97f | de131226159865dcb7b67e49a58d2ddc3f0a82c7 | refs/heads/master | 2023-03-18T03:37:02.916453 | 2022-09-16T01:28:13 | 2022-09-16T01:28:13 | 182,083,445 | 0 | 0 | null | 2023-03-08T05:44:26 | 2019-04-18T12:27:12 | Python | UTF-8 | Python | false | false | 3,363 | py | #
# @lc app=leetcode.cn id=305 lang=python3
#
# [305] 岛屿数量 II
#
# https://leetcode.cn/problems/number-of-islands-ii/description/
#
# algorithms
# Hard (40.90%)
# Likes: 129
# Dislikes: 0
# Total Accepted: 6.4K
# Total Submissions: 15.6K
# Testcase Example: '3\n3\n[[0,0],[0,1],[1,2],[2,1]]'
#
# 给你一个大小为 m x n 的二进制网格 grid 。网格表示一个地图,其中,0 表示水,1 表示陆地。最初,grid
# 中的所有单元格都是水单元格(即,所有单元格都是 0)。
#
# 可以通过执行 addLand 操作,将某个位置的水转换成陆地。给你一个数组 positions ,其中 positions[i] = [ri, ci]
# 是要执行第 i 次操作的位置 (ri, ci) 。
#
# 返回一个整数数组 answer ,其中 answer[i] 是将单元格 (ri, ci) 转换为陆地后,地图中岛屿的数量。
#
# 岛屿 的定义是被「水」包围的「陆地」,通过水平方向或者垂直方向上相邻的陆地连接而成。你可以假设地图网格的四边均被无边无际的「水」所包围。
#
#
# 示例 1:
#
#
# 输入:m = 3, n = 3, positions = [[0,0],[0,1],[1,2],[2,1]]
# 输出:[1,1,2,3]
# 解释:
# 起初,二维网格 grid 被全部注入「水」。(0 代表「水」,1 代表「陆地」)
# - 操作 #1:addLand(0, 0) 将 grid[0][0] 的水变为陆地。此时存在 1 个岛屿。
# - 操作 #2:addLand(0, 1) 将 grid[0][1] 的水变为陆地。此时存在 1 个岛屿。
# - 操作 #3:addLand(1, 2) 将 grid[1][2] 的水变为陆地。此时存在 2 个岛屿。
# - 操作 #4:addLand(2, 1) 将 grid[2][1] 的水变为陆地。此时存在 3 个岛屿。
#
#
# 示例 2:
#
#
# 输入:m = 1, n = 1, positions = [[0,0]]
# 输出:[1]
#
#
#
#
# 提示:
#
#
# 1 <= m, n, positions.length <= 10^4
# 1 <= m * n <= 10^4
# positions[i].length == 2
# 0 <= ri < m
# 0 <= ci < n
#
#
#
#
# 进阶:你可以设计一个时间复杂度 O(k log(mn)) 的算法解决此问题吗?(其中 k == positions.length)
#
#
# @lc code=start
from unittest import result
Point = tuple[int, int]
DIRECTIONS = (0, 1), (0, -1), (1, 0), (-1, 0)
class Solution:
def numIslands2(self, m: int, n: int, positions: list[list[int]]) -> list[int]:
parent: dict[Point, Point] = {}
land_set: set[Point] = set()
def find_root(p: Point):
while p in parent and parent[p] != p:
p = parent[p]
return p
result = []
count = 0
for i, j in positions:
if (i, j) in land_set:
result.append(count)
continue
roots = set()
for di, dj in DIRECTIONS:
ni, nj = i + di, j + dj
if not (0 <= ni < m and 0 <= nj < n):
continue
if (ni, nj) not in land_set:
continue
roots.add(find_root((ni, nj)))
land_set.add((i, j))
if not roots:
count += 1
parent[(i, j)] = (i, j)
else:
roots = list(roots)
parent[(i, j)] = roots[0]
count -= len(roots) - 1
for r in roots:
parent[r] = roots[0]
result.append(count)
return result
# @lc code=end
| [
"luozhou.csy@alibaba-inc.com"
] | luozhou.csy@alibaba-inc.com |
1623d3fbdd87faa2a862b7d957d42eaf13032d77 | 464338d9556cf9892a4647ab1b94dd9d2879274d | /ohealth/ohealth_hospital_unit/ohealth_hospital_unit.py | 4ef944070d5a58afd98081ca967b21ab42611a0e | [] | no_license | linzheyuan/O-health | 5199637de8343508c5fe2d8e611a394cf39d1673 | eab3fc74ee7b878dbcc8234597de053de9d9e608 | refs/heads/master | 2021-01-24T21:36:14.068894 | 2013-07-07T12:34:28 | 2013-07-07T12:34:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | py | # -*- coding: utf-8 -*-
#/#############################################################################
#
# HITSF
#
# Special Credit and Thanks to Thymbra Latinoamericana S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#/#############################################################################
from osv import osv
from osv import fields
class OHealthHospitalUnit(osv.Model):
_name = 'ohealth.hospital.unit'
_columns = {
'code': fields.char(size=8, string='Code'),
'institution': fields.many2one('res.partner', string='Institution',
help='Medical Center'),
'name': fields.char(size=256, string='Name', required=True,
help='Name of the unit, eg Neonatal, Intensive Care, ...'),
'extra_info': fields.text(string='Extra Info'),
}
OHealthHospitalUnit()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"rizabisnis@gmail.com"
] | rizabisnis@gmail.com |
ad89fbcf188dc5000de65e2e77c87eac31f4c8e9 | 814a896307bc2b99ec20d0800cb106280fb1b303 | /venv/lib/python3.6/site-packages/pyquickhelper/__init__.py | b38ca337518389f3a4cc1084900555517420ce55 | [] | no_license | mahagala/HecHms | ae0d4bedfcba33bc7e70eeefadcbd5361a00bd73 | 47521f9cd8dc0f2a51bb6e2660f67a81b3634b16 | refs/heads/master | 2021-09-20T04:16:23.691577 | 2018-08-03T12:46:38 | 2018-08-03T12:46:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,802 | py | # -*- coding: utf-8 -*-
"""
@file
@brief Module *pyquickhelper*.
Helpers to produce documentation, test notebooks, walk through files,
sphinx extension, jenkins helpers...
"""
__version__ = "1.8"
__author__ = "Xavier Dupré"
__github__ = "https://github.com/sdpython/pyquickhelper"
__url__ = "http://www.xavierdupre.fr/app/pyquickhelper/helpsphinx/index.html"
__license__ = "MIT License"
__blog__ = """
<?xml version="1.0" encoding="UTF-8"?>
<opml version="1.0">
<head>
<title>blog</title>
</head>
<body>
<outline text="pyquickhelper"
title="pyquickhelper"
type="rss"
xmlUrl="http://www.xavierdupre.fr/app/pyquickhelper/helpsphinx/_downloads/rss.xml"
htmlUrl="http://www.xavierdupre.fr/app/pyquickhelper/helpsphinx/blog/main_0000.html" />
</body>
</opml>
"""
def check():
"""
Checks the library is working.
It raises an exception if it does not.
@return boolean
"""
from .loghelper import check_log
check_log()
return True
def _setup_hook(add_print=False, unit_test=False):
"""
if this function is added to the module,
the help automation and unit tests call it first before
anything goes on as an initialization step.
It should be run in a separate process.
@param add_print print *Success: _setup_hook*
@param unit_test used only for unit testing purpose
"""
# it can check many things, needed module
# any others things before unit tests are started
if add_print:
print("Success: _setup_hook")
def load_ipython_extension(ip):
"""
to allow the call ``%load_ext pyquickhelper``
@param ip from ``get_ipython()``
"""
from .ipythonhelper.magic_class_example import register_file_magics as freg
freg(ip)
from .ipythonhelper.magic_class_compress import register_file_magics as creg
creg(ip)
from .ipythonhelper.magic_class_diff import register_file_magics as dreg
dreg(ip)
from .ipythonhelper.magic_class_crypt import register_file_magics as ereg
ereg(ip)
def get_fLOG(log=True):
"""
return a logging function
@param log True, return @see fn fLOG, otherwise @see fn noLOG
@return function
"""
if log:
from .loghelper import fLOG
return fLOG
else:
from .loghelper import noLOG
return noLOG
def get_insetup_functions():
"""
Returns function used when a module includes C++ parts.
@return tuple of functions
.. versionadded:: 1.5
"""
from .pycode.insetup_helper import must_build, run_build_ext
return must_build, run_build_ext
| [
"hasithadkr7@gmail.com"
] | hasithadkr7@gmail.com |
94f892fb826285f19d4096ae7534ca287cd81c34 | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/beta/usersfunctions_beta/azext_usersfunctions_beta/vendored_sdks/usersfunctions/aio/operations/_users_mail_folders_child_folders_operations.py | 3c57de2889bfbd96a7a2045bf0eee4a11d50f55d | [
"MIT"
] | permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 4,332 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsersMailFoldersChildFoldersOperations:
"""UsersMailFoldersChildFoldersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_functions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def delta(
self,
user_id: str,
mail_folder_id: str,
**kwargs
) -> List["models.MicrosoftGraphMailFolder"]:
"""Invoke function delta.
Invoke function delta.
:param user_id: key: id of user.
:type user_id: str
:param mail_folder_id: key: id of mailFolder.
:type mail_folder_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphMailFolder, or the result of cls(response)
:rtype: list[~users_functions.models.MicrosoftGraphMailFolder]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphMailFolder"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delta.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'mailFolder-id': self._serialize.url("mail_folder_id", mail_folder_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphMailFolder]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delta.metadata = {'url': '/users/{user-id}/mailFolders/{mailFolder-id}/childFolders/microsoft.graph.delta()'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
e6b0f5c7362a6bbaded744a6dfcf4a06283bb1a3 | 4c5ba474931fdb0a010229538cc92c121f5d01fa | /etc_graph/topology_sort.py | 2c5529a16f82577d25f544274d36b47fd8b81289 | [] | no_license | jeeHwon/Algorithm | f3d1569ec8e2c68810dee7915e9b50f4ebfb7f03 | 2ac3696b8cb5a987bbfbf5ad92433de94b172b25 | refs/heads/master | 2023-03-21T10:33:08.599024 | 2021-03-06T12:05:03 | 2021-03-06T12:05:03 | 335,676,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,099 | py | # 위상정렬(Topology Sort)
# 사이클이 없는 방향 그래프(DAG)의 모든 노드를 방향성에 거스르지 않도록 순서대로 나열 의미
# 집입차수(Indegree) : 특정한 노드로 들어오는 간선의 개수
# 진출차수(Outdegree) : 특정한 노드에서 나가는 간선의 개수
# 큐를 이용하여 구현 가능 (스택도 가능)
# 예시) 선수과목을 고려한 학습 순서 설정
# 여러가지 답 존재 가능, 모든 원소 방문 전 큐가 빈다면 사이크 존재한다고 판단가능
# 위상 정렬 위해 차례로 모든 노드를 확인하고, 각 노드에서 나가는 간선을 차례로 제거
# 따라서 시간 복잡도 => O(V+E)
from collections import deque
# 노드의 개수와 간선의 개수를 입력받기
v, e = map(int, input().split())
# 모든 노드에 대한 진입차수는 0으로 초기화
indegree = [0] * (v+1)
# 각 노드에 연결된 간선 정보를 담기 위한 연결 리스트 초기화
graph = [[] for i in range(v+1)]
# 방향그래프의 모든 간선 정보를 입력받기
for _ in range(e):
a, b = map(int, input().split())
graph[a].append(b) # 정점 A에서 정점 B로 이동가능
# 진입차수를 1증가
indegree += 1
# 위상정렬 함수
def topology_sort():
result = [] # 알고리즘 수행 결과를 담을 리스트
q = deque() # 큐 기능을 위한 deque 라이브러리 사용
# 처음 시작할 때는 진입차수가 0인 노드를 큐에 삽입
for i in range(1, v+1):
if indegree[i] == 0:
q.append(i)
# 큐가 빌 때까지 반복
while q:
# 큐에서 원소 꺼내기
now = q.popleft()
result.append(now)
# 해당 원소와 연결된 노드들의 진입차수에서 1 빼기
for i in graph[now]:
indegree[i] -= 1
# 새롭게 진입차수가 0이 되는 노드를 큐에 삽입
if indegree[i] == 0:
q.append(i)
# 위상 정렬을 수행한 결과 출력
for i in result:
print(i, end=" ")
topology_sort()
| [
"jeehwon01@gmail.com"
] | jeehwon01@gmail.com |
2c53ed866dd566ad80cb5b2273903ac2a2e579ec | e2f507e0b434120e7f5d4f717540e5df2b1816da | /083-echo-try.py | dae2f22e522765dc20e3b59235b6cc283d637ab3 | [] | no_license | ash/amazing_python3 | 70984bd32ae325380382b1fe692c4b359ef23395 | 64c98940f8a8da18a8bf56f65cc8c8e09bd00e0c | refs/heads/master | 2021-06-23T14:59:37.005280 | 2021-01-21T06:56:33 | 2021-01-21T06:56:33 | 182,626,874 | 76 | 25 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | # A simple "echo"
# program
while True:
try:
s = input()
print(s)
except:
break
# Now the program will
# simply stop after
# the end of input
| [
"andy@shitov.ru"
] | andy@shitov.ru |
21d43124852a540b2762bea05fac82cfa6f4f1dc | b5a6f10c886fba6584d2ac7b4a29c69975826dbb | /clients/python/test/test_constant_offer_filter_constant.py | 42b5144b273f936acdaf83f591d17f4b9965139d | [] | no_license | Patagona/pricemonitor-clients | 8c4f842ca3d4e459c77ac329ad488cb3e4c858bf | cf2d689bf9ed6ddea9501324cada918c3a88b4f8 | refs/heads/master | 2023-08-31T20:12:58.844253 | 2023-08-31T15:26:25 | 2023-08-31T15:26:25 | 279,618,794 | 1 | 1 | null | 2023-07-03T13:55:28 | 2020-07-14T15:09:38 | Python | UTF-8 | Python | false | false | 1,601 | py | # coding: utf-8
"""
Pricemonitor API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 0.0.6561
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import pricemonitor_api_client
from pricemonitor_api_client.models.constant_offer_filter_constant import ConstantOfferFilterConstant # noqa: E501
from pricemonitor_api_client.rest import ApiException
class TestConstantOfferFilterConstant(unittest.TestCase):
"""ConstantOfferFilterConstant unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ConstantOfferFilterConstant
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = pricemonitor_api_client.models.constant_offer_filter_constant.ConstantOfferFilterConstant() # noqa: E501
if include_optional :
return ConstantOfferFilterConstant(
value = True
)
else :
return ConstantOfferFilterConstant(
)
def testConstantOfferFilterConstant(self):
"""Test ConstantOfferFilterConstant"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"jenkins@patagona.de"
] | jenkins@patagona.de |
cc48d3eb0c09c41a15f8a80ca313c4422069c678 | d98883fe1007111b8795ac5661e56758eca3b62e | /google-cloud-sdk/lib/surface/bigtable/app_profiles/update.py | 53b1e31c063dde9b7875f2512491bd5026cbb0c8 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | pyDeb/KindnessCafe | 7303464e3c0693b0586a4a32740d8b9b19299caf | 6ff8dfe338aefd986edf67c382aff1a2920945d1 | refs/heads/master | 2022-12-29T16:16:35.796387 | 2021-04-19T00:03:14 | 2021-04-19T00:03:14 | 243,533,146 | 3 | 4 | null | 2022-12-08T09:48:09 | 2020-02-27T14:01:16 | Python | UTF-8 | Python | false | false | 2,595 | py | # -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""bigtable app profiles update command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py.exceptions import HttpError
from googlecloudsdk.api_lib.bigtable import app_profiles
from googlecloudsdk.api_lib.bigtable import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.bigtable import arguments
from googlecloudsdk.core import log
class UpdateAppProfile(base.CreateCommand):
"""Update a Bigtable app profile."""
@staticmethod
def Args(parser):
arguments.AddAppProfileResourceArg(parser, 'to update')
(arguments.ArgAdder(parser).AddDescription(
'app profile', required=False).AddAppProfileRouting(
required=False).AddForce('update').AddAsync())
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Raises:
exceptions.ConflictingArgumentsException: If the user provides
--transactional-writes and --route-any.
Returns:
Created resource.
"""
app_profile_ref = args.CONCEPTS.app_profile.Parse()
try:
result = app_profiles.Update(
app_profile_ref,
cluster=args.route_to,
description=args.description,
multi_cluster=args.route_any,
transactional_writes=args.transactional_writes,
force=args.force)
except HttpError as e:
util.FormatErrorMessages(e)
else:
operation_ref = util.GetOperationRef(result)
if args.async_:
log.UpdatedResource(
operation_ref,
kind='bigtable app profile {0}'.format(app_profile_ref.Name()),
is_async=True)
return result
return util.AwaitAppProfile(
operation_ref,
'Updating bigtable app profile {0}'.format(app_profile_ref.Name()))
| [
"zeus@localhost.localdomain"
] | zeus@localhost.localdomain |
516b7c00610a5542350c77943a7641dc3dd1fd04 | ea99544eef7572b194c2d3607fa7121cb1e45872 | /apps/report/migrations/0007_auto_20190408_0925.py | 117f656259e0e66f51eb509aad072678f43a5625 | [] | no_license | ash018/FFTracker | 4ab55d504a9d8ba9e541a8b682bc821f112a0866 | 11be165f85cda0ffe7a237d011de562d3dc64135 | refs/heads/master | 2022-12-02T15:04:58.543382 | 2019-10-05T12:54:27 | 2019-10-05T12:54:27 | 212,999,035 | 0 | 0 | null | 2022-11-22T03:58:29 | 2019-10-05T12:53:26 | Python | UTF-8 | Python | false | false | 770 | py | # Generated by Django 2.2 on 2019-04-08 09:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('report', '0006_auto_20190408_0646'),
]
operations = [
migrations.AlterField(
model_name='attendanceindividual',
name='status',
field=models.PositiveSmallIntegerField(choices=[(1, 'Present'), (0, 'Absent'), (2, 'Weekend'), (4, 'On Leave'), (3, 'Holiday')], default=0),
),
migrations.AlterField(
model_name='ranking',
name='date_range',
field=models.PositiveSmallIntegerField(choices=[(3, 'last_week'), (4, 'this_month'), (5, 'last_month'), (1, 'last_day'), (2, 'this_week')], default=2),
),
]
| [
"sadatakash018@gmail.com"
] | sadatakash018@gmail.com |
450246d63f312e11688d6d87ed06ce3a267602e8 | 3fdc6c7e280f31629a2bbf5f1e850786415e480c | /browser.py | fbc04bf55445e646429b701590988a4939afbb03 | [] | no_license | jmg/simple-web-browser | 970470957138922eb52d1cde2819f175f5d90eb3 | a3c8c55701cf7822149ceb7d6c5b686c9f8627fb | refs/heads/master | 2021-01-10T18:57:48.433231 | 2011-11-22T02:07:07 | 2011-11-22T02:07:07 | 2,399,458 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,299 | py | from PyQt4 import QtCore, QtWebKit
from baseBrowser import BaseBrowser, BaseBrowserTab
from config import DEFAULTS
class Browser(BaseBrowser):
"""
A Browser representation
This class overrides all the methods of the
base class.
"""
def __init__(self):
self.default_url = DEFAULTS['url']
BaseBrowser.__init__(self)
self.add_tab()
def current_tab(self):
""" Return the current tab """
return self.ui.tab_pages.currentWidget()
def browse(self):
""" Make a browse and call the url loader method """
url = self.ui.tb_url.text() if self.ui.tb_url.text() else self.default_url
if not DEFAULTS['protocol'] in url:
url = "%s://%s" % (DEFAULTS['protocol'], url)
tab = self.current_tab()
self.ui.tb_url.setText(url)
tab.load_url(url)
def add_tab(self):
""" Add a new tab to the browser """
index = self.ui.tab_pages.addTab(BrowserTab(self.ui), "New Tab")
self.ui.tab_pages.setCurrentIndex(index)
self.ui.tb_url.setFocus()
self.browse()
def tab_closed(self, index):
""" Triggered when the user close a tab """
self.ui.tab_pages.widget(index).deleteLater()
if self.ui.tab_pages.count() <= 1:
self.ui.close()
def tab_changed(self, index):
""" Triggered when the current tab changes """
tab = self.current_tab()
if tab is not None and tab.url is not None:
self.ui.tb_url.setText(tab.url)
def show(self):
""" Show the main windows """
self.ui.show()
class BrowserTab(BaseBrowserTab):
"""
A Browser Tab representation
This class overrides all the methods of the
base class.
"""
def __init__(self, parent):
BaseBrowserTab.__init__(self, parent)
self.url = None
def load_bar(self, value):
""" Load the progress bar """
self.pg_load.setValue(value)
def loaded_bar(self, state):
""" Triggered when the bar finish the loading """
self.pg_load.hide()
index = self.parent.tab_pages.indexOf(self)
self.parent.tab_pages.setTabText(index, self.html.title())
self.parent.tab_pages.setTabIcon(index, QtWebKit.QWebSettings.iconForUrl(QtCore.QUrl(self.url)))
def load_start(self):
""" Show the progress bar """
self.pg_load.show()
def load_url(self, url):
""" Load the requested url in the webwiew """
self.url = url
self.html.load(QtCore.QUrl(url))
self.html.show()
def url_changed(self, url):
""" Update the url text box """
if self.is_current():
self.parent.tb_url.setText(self.url)
self.url = url.toString()
def back(self):
"""" Back to previous page """
if self.is_current():
self.html.back()
def ahead(self):
"""" Go to next page """
if self.is_current():
self.html.forward()
def reload(self):
"""" Reload page """
if self.is_current():
self.html.reload()
def is_current(self):
"""" Return true if this is the current active tab """
return self is self.parent.tab_pages.currentWidget()
| [
"jmg.utn@gmail.com"
] | jmg.utn@gmail.com |
343405fb1a09d6270a46f3f31a045a175b213b87 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/1/test_20200606190302.py | 6ac7d1e8d3f7dff5414a65aed718dfe59f0e1101 | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | import os
import json
people = ['rfdfdf']
with open(os.path.dirname(os.path.realpath(__file__)) + "/save.json", 'r') as file:
people = json.load(file)
while True:
msg = input().encode('ascii')
if msg == "stop":
break
elif msg == "show all":
print(people)
else:
name = ""
game = ""
try:
name, game = msg.split(', ')
except Exception as er:
print("Err, try again with 2 params separated by ', '")
people.append({"name": name, "game": game})
with open(os.path.dirname(os.path.realpath(__file__)) + "/save.json", 'w') as file:
json.dump(people, file)
# print("STOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOP")
# for name in people:
# if name != "JoJo":
# print(name) | [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
1f97ec440ba6c6a279915b778e547ad93602ac7e | 2b3bbfc742ad6a2529f2906193c3c5263ebd5fac | /tools/crc/crc_init.py | 35cab0578bc55530478de02b4d2b2470886733f6 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | permissive | foxBMS/foxbms-2 | 35502ef8441dfc7374fd6c0839e7f5328a5bda8f | 9eb6d1c44e63e43e62bbf6983b2e618fb6ad02cc | refs/heads/master | 2023-05-22T05:30:25.862475 | 2023-02-23T15:03:35 | 2023-02-24T15:04:15 | 353,751,476 | 151 | 80 | NOASSERTION | 2023-09-01T09:59:30 | 2021-04-01T15:52:24 | C | UTF-8 | Python | false | false | 5,199 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 - 2023, Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# - "This product uses parts of foxBMS®"
# - "This product includes parts of foxBMS®"
# - "This product is derived from foxBMS®"
"""Generate CRC lookup tables for usage in foxBMS"""
import argparse
import logging
import sys
LINE_LENGTH = 120
def get_hex_rep(table):
"""Generate nice printable hex representation of the lookup table"""
max_str_len = len(hex(max(table)))
hex_table = []
for i in table:
hex_rep = f"{i:#0{max_str_len}X}u"
hex_rep = hex_rep[:2].lower() + hex_rep[2:]
hex_table.append(hex_rep)
return hex_table
def generate_c_table(table, crc_len):
"""Generate a CRC table as the foxBMS C style guide requires"""
lines = [
f"/* precomputed CRC-{crc_len} Table */",
f"static const unsigned int crc{crc_len}Table[{len(table)}] = {{",
" ",
]
index = len(lines) - 1
for i in get_hex_rep(table):
if len(lines[index] + f"{i},") < LINE_LENGTH + 1:
lines[index] += f"{i}, "
else:
index += 1
lines.append(f" {i}, ")
lines.append("};")
print("\n".join(lines))
def precalculate_crc_table(polynomial, width):
"""Generate a CRC lookup table based on the polynomial"""
mask = 1 << (width - 1)
table = []
for i in range(256):
remainder = i << (width - 8)
for _ in range(8):
if remainder & mask:
remainder = (remainder << 1) ^ polynomial
else:
remainder <<= 1
remainder = remainder & 0xFFFF
table.append(remainder)
return table
def check_positive_integer(value):
"""Check that the provided value is castable to int"""
try:
value = int(value)
except ValueError:
sys.exit("Width must be an integer.")
if value <= 0:
sys.exit("Width must be a positive integer.")
return value
def check_hex(value):
"""Check that the provided value is a hex representation"""
if not value.lower().startswith("0x"):
sys.exit("Polynomial must be provided as hex representation.")
return value
def main():
"""This script does this and that"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
action="count",
default=0,
help="set verbosity level",
)
parser.add_argument(
"-p",
"--polynomial",
dest="polynomial",
action="store",
type=check_hex,
default="0xC599",
help="CRC polynomial",
)
parser.add_argument(
"-w",
"--width",
dest="width",
action="store",
type=check_positive_integer,
default=15,
help="CRC width",
)
args = parser.parse_args()
if args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbosity > 1:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
polynomial = int(args.polynomial, 16)
width = int(args.width)
logging.debug(f"polynomial: {polynomial:#0x}")
logging.debug(f"width: {width}")
table = precalculate_crc_table(polynomial, width)
logging.debug("C code:")
generate_c_table(table, width)
if __name__ == "__main__":
main()
| [
"info@foxbms.org"
] | info@foxbms.org |
ff7aa44906bb0f492c773e8be7365b2fd77bfd3b | 7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a | /examples/adspygoogle/dfp/v201103/update_custom_targeting_values.py | d97cfc120d9432381cb5eec1c4073a5401b8ac01 | [
"Apache-2.0"
] | permissive | hockeyprincess/google-api-dfp-python | 534519695ffd26341204eedda7a8b50648f12ea9 | efa82a8d85cbdc90f030db9d168790c55bd8b12a | refs/heads/master | 2021-01-10T10:01:09.445419 | 2011-04-14T18:25:38 | 2011-04-14T18:25:38 | 52,676,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,585 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates the display name of each custom targeting value up to
the first 500. To determine which custom targeting keys exist, run
get_all_custom_targeting_keys_and_values.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# sandbox environment.
custom_targeting_service = client.GetCustomTargetingService(
'https://sandbox.google.com', 'v201103')
key_id = 'INSERT_CUSTOM_TARGETING_VALUE_ID_HERE'
values = [{
'key': 'keyId',
'value': {
'xsi_type': 'NumberValue',
'value': key_id
}
}]
filter_statement = {'query': 'WHERE customTargetingKeyId = :keyId LIMIT 500',
'values': values}
# Get custom targeting values by statement.
values = custom_targeting_service.GetCustomTargetingValuesByStatement(
filter_statement)[0]['results']
# Update each local custom targeting value object by changing its display name.
if values:
for value in values:
if not value['displayName']:
value['displayName'] = value['name']
value['displayName'] += ' (Deprecated)'
values = custom_targeting_service.UpdateCustomTargetingValues(values)
# Display results.
if values:
for value in values:
print ('Custom targeting value with id \'%s\', name \'%s\', and display '
'name \'%s\' was updated.'
% (value['id'], value['name'], value['displayName']))
else:
print 'No custom targeting values were updated.'
else:
print 'No custom targeting values were found to update.'
| [
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] | api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138 |
55684fdb0228754c5b0c83f161cec75a2780a7c8 | d569476dd95496339c34b231621ff1f5dfd7fe49 | /PyTest/SteamSender/tests/PageObject/SteamActions.py | ef758dc5bfc9407871b3bb268f525393268eeabf | [] | no_license | monteua/Tests | 10f21f9bae027ce1763c73e2ea7edaf436140eae | 553e5f644466683046ea180422727ccb37967b98 | refs/heads/master | 2021-01-23T10:28:49.654273 | 2018-05-09T09:11:30 | 2018-05-09T09:11:30 | 93,061,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
class SteamHome(object):
def __init__(self, driver):
self.driver = driver
def open_browser(self):
self.driver.get("https://steamcommunity.com/login/home/?goto=")
def enter_credentials(self, login, password):
self.driver.find_element_by_id("steamAccountName").send_keys(login)
self.driver.find_element_by_id("steamPassword").send_keys(password, Keys.ENTER)
def pass_steam_guard(self):
WebDriverWait(self.driver, 20).until(EC.visibility_of(self.driver.find_element_by_id("blotter_statuspost_textarea")))
def open_trade_url(self):
self.driver.get("https://steamcommunity.com/tradeoffer/new/?partner=81735615&token=lhNyIIkQ")
time.sleep(2)
self.driver.execute_script("javascript: TradePageSelectInventory(UserYou, 753, 0);")
time.sleep(2)
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(20)
def log_off(self):
self.driver.execute_script("javascript: Logout();") | [
"arximed.monte@gmail.com"
] | arximed.monte@gmail.com |
8e31ff5084ea5f8bf5777a184c508e97ccd22cac | 2da6133f3cd5c5fc19355292d60253b8c0dbcd49 | /.history/antz/models_20200403232602.py | e82e48acaad35fce9f5ead09f86c2ffb0b256759 | [] | no_license | mirfarzam/python-advance-jadi-maktabkhooneh | b24f5c03ab88e3b12c166a439b925af92f50de49 | d9bcecae73fd992f1290c6fd76761683bb512825 | refs/heads/master | 2021-05-25T21:33:37.782734 | 2020-04-07T22:39:28 | 2020-04-07T22:39:28 | 253,927,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from django.db import models
class CardBrand(models.Model):
name = models.charField(max_length=128)
class CarModel(models.Model):
name = models.charField(max_length=128)
brand = models.ForeignKey(CarBrand, on_delete = models.CASCADE)
class Car(models.Model):
name = models.charField(max_length=255)
brand = models.ForeignKey(CarBrand, on_delete = models.CASCADE)
model = models.ForeignKey(CarModel, on_delete = models.CASCADE)
price = models.Integer | [
"farzam.mirmoeini@gmail.com"
] | farzam.mirmoeini@gmail.com |
4105ca931275e881465db1fab1190e4b1ed38288 | 3e5150447a2c90c26354500f1df9660ef35c990b | /filesystem/delete.py | 9dc80d9ebca76b9e647f7e8d1bf7799d70ed8c7c | [] | no_license | kilirobbs/python-fiddle | 8d6417ebff9d6530e713b6724f8416da86c24c65 | 9c2f320bd2391433288cd4971c2993f1dd5ff464 | refs/heads/master | 2016-09-11T03:56:39.808358 | 2013-03-19T19:26:19 | 2013-03-19T19:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | from os import remove
from shutil import rmtree
#remove(filename)
folder="/Users/nordmenss/git/GISTS/4050474/.git/hooks"
rmtree(folder) | [
"cancerhermit@gmail.com"
] | cancerhermit@gmail.com |
82e3ddab0c74beb14332a162cfc09b3f38772cca | d5c6af12520a0f125c3f12b5f4da8a47492b7dc0 | /mathematics/find_the_point.py | e028c70e5952ce5807f60ee56c50267d9cd9b977 | [] | no_license | LubosKolouch/hackerrank | 4e0a2a5ff1309152c6515732f408ee1434712bff | 21de03b0638277108c250c2971fbd3e5b69cf454 | refs/heads/master | 2022-12-24T06:42:13.411394 | 2020-10-04T14:52:04 | 2020-10-04T14:52:04 | 266,756,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | """ https://www.hackerrank.com/challenges/find-point/ """
def findPoint(px, py, qx, qy):
#
# Write your code here.
#
return 2*qx-px, 2*qy-py
| [
"lubos@kolouch.net"
] | lubos@kolouch.net |
6fcd455d0ae546f2a8441ff6cdb63c295ed32199 | 399dae0b5ad9ca27cde175d25b5435958674eb50 | /System/Run PowerShell Script/run-powershell-script.py | 276874c0fa446e1832bd4e3f0f779886e8c565e6 | [] | no_license | kannanch/pythonscripts | 61e3ea9e8ebf6a6b0ec2a4a829664e4507b803ba | 843a522236f9c2cc2aadc68d504c71bb72600bd9 | refs/heads/master | 2020-06-12T11:18:00.404673 | 2019-06-28T11:24:37 | 2019-06-28T11:24:37 | 194,282,297 | 1 | 0 | null | 2019-06-28T13:55:56 | 2019-06-28T13:55:56 | null | UTF-8 | Python | false | false | 1,655 | py | ps_content=r'''
function Get-Uptime {
Param(
$ComputerName = $env:COMPUTERNAME)
if ($lastBootUpTime = (Get-WmiObject win32_operatingsystem -ComputerName $ComputerName| select @{LABEL='LastBootUpTime';EXPRESSION={$_.ConverttoDateTime($_.lastbootuptime)}}).LastBootUpTime) {
(Get-Date) - $lastBootUpTime
} else {
Write-Error "Unable to retrieve WMI Object win32_operatingsystem from $ComputerName"}}
Get-Uptime
'''
import os
def ecmd(command):
import ctypes
from subprocess import PIPE, Popen
class disable_file_system_redirection:
_disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
_revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self._disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self._revert(self.old_value)
with disable_file_system_redirection():
obj = Popen(command, shell = True, stdout = PIPE, stderr = PIPE)
out, err = obj.communicate()
ret=obj.returncode
if ret==0:
if out:
return out.strip()
else:
return ret
else:
if err:
return err.strip()
else:
return ret
file_name='powershell_file.ps1'
file_path=os.path.join(os.environ['TEMP'], file_name)
with open(file_path, 'wb') as wr:
wr.write(ps_content)
ecmd('powershell "Set-ExecutionPolicy RemoteSigned"')
print ecmd('powershell "%s"'%file_path)
os.remove(file_path)
| [
"noreply@github.com"
] | kannanch.noreply@github.com |
0964e9a5dbb4533d4abfd265dd6d8ba7144e2691 | 48323c491536a1190f6287161f61230eb0232dfe | /Leetcode/NextPermutation.py | abb8d7755bb53bab4b5faa5ac26972801f8ed397 | [] | no_license | cgxabc/Online-Judge-Programming-Exercise | d8249846eaf1a7f6f228aeae5adcee6d90dfcce6 | 724f514e7dc7774f2df5eecf90ef2a678b233a29 | refs/heads/master | 2021-04-28T13:27:42.239709 | 2018-02-19T18:51:05 | 2018-02-19T18:51:05 | 122,104,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 17 15:05:43 2017
@author: apple
"""
"""
Implement next permutation, which rearranges numbers into the lexicographically next greater permutation of numbers.
If such arrangement is not possible, it must rearrange it as the lowest possible order (ie, sorted in ascending order).
The replacement must be in-place, do not allocate extra memory.
Here are some examples. Inputs are in the left-hand column and its corresponding outputs are in the right-hand column.
1,2,3 → 1,3,2
3,2,1 → 1,2,3
1,1,5 → 1,5,1
"""
def nextPermutation(nums):
k,l=-1,0
for i in xrange(len(nums)-1):
if nums[i]<nums[i+1]:
k=i
if k==-1:
nums.reverse()
return
for i in xrange(k+1, len(nums)):
if nums[i]>nums[k]:
l=i
nums[k], nums[l]=nums[l], nums[k]
nums[k+1:]=nums[:k:-1]
return nums
print nextPermutation([1,3,4,19,100,67,21,5])
#[1,3,4,21,100,67,19,5]
#[1,3,4,21,5,19,67,100]
#[1, 3, 4, 21, 5, 19, 67, 100]
#def combinationSum(candidates,target):
#nums=[1,3,4,19,8,6,21,5]
#k,l=-1,0
#for i in xrange(len(nums)-1):
# if nums[i]<nums[i+1]:
# k=i
#for i in xrange(k+1, len(nums)):
# if nums[i]>nums[k]:
# l=i
#print k, l #5,6
#[1,3,4,19,8,21,6,5]
#[1,3,4,19,8,21,5,6]
| [
"noreply@github.com"
] | cgxabc.noreply@github.com |
f585be332ad48617e79e279f22f8d8d6ec1fe263 | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/train/python/90ec77532048b6bf469559849d5ca80cd4e54f69action_controller.py | 7d45544d8058f62ec577713a798563d315ed7df2 | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 2,067 | py | from Menu.menu import Menu
from Menu.text_menu_entry import TextMenuEntry
from Screen.Console.Menu.ActionMenu.action_menu_screen import ActionMenuScreen
from Screen.Console.Menu.ActionMenu.AttackMenu.attack_controller import AttackController
from Screen.Console.Menu.ActionMenu.SwitchMenu.switch_controller import SwitchController
from kao_console.ascii import ENDL, KAO_UP, KAO_DOWN, KAO_LEFT, KAO_RIGHT
from kao_gui.console.console_controller import ConsoleController
class ActionController(ConsoleController):
""" Controller for selecting a Battle Action """
def __init__(self, pokemon, battle):
""" Builds the Action Controller """
self.pokemon = pokemon
self.battle = battle
self.action = None
entries = [TextMenuEntry("Fight", self.chooseAttack),
TextMenuEntry("Switch", self.switch),
TextMenuEntry("Item", None),
TextMenuEntry("Run", None)]
self.menu = Menu(entries, columns=2)
screen = ActionMenuScreen(self.menu, battle)
cmds = {ENDL:self.menu.enter,
KAO_UP:self.menu.up,
KAO_DOWN:self.menu.down,
KAO_RIGHT:self.menu.right,
KAO_LEFT:self.menu.left}
ConsoleController.__init__(self, screen, commands=cmds)
def chooseAttack(self, entry):
""" Run the Attack Menu Controller """
attackMenuController = AttackController(self.pokemon, self.battle.oppSide.pkmnInPlay, self.battle)
self.runController(attackMenuController)
def switch(self, entry):
""" Run the Switch Menu Controller """
switchMenuController = SwitchController(self.pokemon)
self.runController(switchMenuController)
def runController(self, controller):
""" Runs the given controller """
ConsoleController.runController(self, controller)
if controller.action is not None:
self.action = controller.action
self.stopRunning() | [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
02893c861cf938bbf3afe51a1e5d61e2b8f327bd | 6ae8717002f8fce4457cceb3375a114ddcb837df | /1-100/18. Four Sum.py | ce1a72a2e443da34b890db6074991804027e0403 | [] | no_license | SunnyMarkLiu/LeetCode | 31aea2954d5a84d11a1c4435f760c1d03c6c1243 | 852fad258f5070c7b93c35252f7404e85e709ea6 | refs/heads/master | 2020-05-30T07:17:33.992197 | 2018-03-29T03:57:51 | 2018-03-29T03:57:51 | 104,643,862 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,484 | py | #!/home/sunnymarkliu/software/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
4 sum 问题转化为 3 sum 问题
同时注意时间复杂度而进行的边界检查
@author: MarkLiu
@time : 17-10-31 下午8:05
"""
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
if len(nums) < 3:
return []
solutions = []
nums.sort() # 注意先排序
max_num = nums[-1]
min_num = nums[0]
if 4 * min_num > target or 4 * max_num < target: # 最大值最小值的边间检测
return []
for i in xrange(len(nums)):
if i > 0 and nums[i] == nums[i - 1]: # 去除重复数据
continue
if nums[i] + 3 * max_num < target: # nums[i] 太小了
continue
if nums[i] + 3 * min_num > target: # nums[i] 太大了
break
tmp_target = target - nums[i]
if i == 0:
tmp_nums = nums[1:]
elif i == len(nums) - 1:
tmp_nums = nums[:-1]
else:
tmp_nums = nums[:i]
tmp_nums.extend(nums[i + 1:])
# three sum problem
for j in xrange(len(tmp_nums) - 2):
if j > 0 and tmp_nums[j] == tmp_nums[j - 1]: # 去除重复数据
continue
l_index, r_index = j + 1, len(tmp_nums) - 1
while l_index < r_index:
s = tmp_nums[j] + tmp_nums[l_index] + tmp_nums[r_index]
if s < tmp_target:
l_index += 1
elif s > tmp_target:
r_index -= 1
else:
s = [nums[i], tmp_nums[j], tmp_nums[l_index], tmp_nums[r_index]]
s.sort()
if s not in solutions:
solutions.append(s)
while l_index < r_index and tmp_nums[l_index] == tmp_nums[l_index + 1]: # 去除重复数据
l_index += 1
while l_index < r_index and tmp_nums[r_index] == tmp_nums[r_index - 1]:
r_index -= 1
l_index += 1
r_index -= 1
return solutions
print Solution().fourSum([1, 0, -1, 0, -2, 2], 0)
| [
"SunnyMarkLiu101@gmail.com"
] | SunnyMarkLiu101@gmail.com |
c4b24cf35d4b377de870d3648ce56e6d70ebe71b | 79ad16a56df93085651886375920306e63121690 | /docs_src/tutorial/fastapi/limit_and_offset/tutorial001.py | 9bdf60446a6f961ddc89296af3342889726b0341 | [
"MIT"
] | permissive | macrosfirst/sqlmodel | 4286f72144afbf1476368e3fd0ca895852799046 | bda2e2818a3e7c2a18be4adf55bfea9bad83bfcc | refs/heads/main | 2023-08-14T02:09:27.072625 | 2021-09-29T13:31:54 | 2021-09-29T13:31:54 | 403,592,064 | 0 | 0 | MIT | 2021-09-29T13:31:55 | 2021-09-06T11:11:59 | Python | UTF-8 | Python | false | false | 1,599 | py | from typing import List, Optional
from fastapi import FastAPI, HTTPException, Query
from sqlmodel import Field, Session, SQLModel, create_engine, select
class HeroBase(SQLModel):
name: str
secret_name: str
age: Optional[int] = None
class Hero(HeroBase, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
class HeroCreate(HeroBase):
pass
class HeroRead(HeroBase):
id: int
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroRead)
def create_hero(hero: HeroCreate):
with Session(engine) as session:
db_hero = Hero.from_orm(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=List[HeroRead])
def read_heroes(offset: int = 0, limit: int = Query(default=100, lte=100)):
with Session(engine) as session:
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroRead)
def read_hero(hero_id: int):
with Session(engine) as session:
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
| [
"tiangolo@gmail.com"
] | tiangolo@gmail.com |
0396719e3079adfc788b7ebb764fa5b83d5d56c3 | b2ab2caae8d6a24dfb6e43852ed5fd416d912dad | /flask/day02flask没有api/utils/ch_login.py | ec62393c203afbf7310991b009cf93b18b05e567 | [] | no_license | cheyunping77/learingnote | 781d55eb2e951049840e58ee41af3de8490fd37e | edba6c247eefe33829ba549068d67dcb288ea28b | refs/heads/master | 2023-08-18T12:57:45.624973 | 2023-07-24T06:47:11 | 2023-07-24T06:47:11 | 256,951,310 | 0 | 0 | null | 2020-04-19T08:35:17 | 2020-04-19T08:35:16 | null | UTF-8 | Python | false | false | 271 | py | from flask import url_for,redirect,session
def is_login(func):
def check_login():
user_session = session.get('user_id')
if user_session:
return func
else:
return redirect(url_for('user.login'))
return check_login | [
"380604322@qq.com"
] | 380604322@qq.com |
26fe6bcdf3b7a3144632734a6041c873371cfccb | c1db68ab2abc9c03a733e8de00eca134fe987a67 | /req2.py | 2fee3dd944dd30b7e0d8e08b7533c7b499cbfcb6 | [] | no_license | pooja-pichad/request | f64560c33d6b3f131ab25274f4c7ebc0c88c866e | bf9613b5b23f3df0a15e3367d3f100840ccef23f | refs/heads/main | 2023-08-21T09:35:50.905226 | 2021-10-14T09:06:28 | 2021-10-14T09:06:28 | 387,688,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,339 | py | import requests
import json
# calling a saral api
def saral():
saral_api = " http://saral.navgurukul.org/api/courses" #This link is a API and url
saral_url = requests.get(saral_api) #request is a server
# convert into a json
data = saral_url.json()
with open ("meraki_courses.json","w") as saral_data:
json.dump(data,saral_data,indent = 4)
# Here it was title welcome to navgurukul
print("")
print("* Welcome to navgurukul and Learn basic programming launguage *")
print("")
# And then find the cource name all.....
def parent():
serial_no = 0
for i in data["availableCourses"]:
print(serial_no+1 ,i["name"], i["id"])
serial_no=serial_no+1
parent()
print("")
user_input =int(input("Enter your courses number that you want to learn:- "))
parent_id=data["availableCourses"][user_input-1]["id"]
print(data["availableCourses"][user_input-1]["name"])
print("")
print("* Welcome to navgurukul and Learn basic programming launguage *")
print("")
# And then taking userinput in previous or next .... previous then it will be print all courses name next then it will be print parents...
user_input_1=input("if you want next or previous n/p: ")
if user_input_1=="p":
i=0
while i<len(data["availableCourses"]):
Courses = (data["availableCourses"][i]["name"])
print(i+1," ",Courses,data["availableCourses"][i]["id"])
i=i+1
user_input = int(input("Enter your courses number that you want to learn:-"))
print(data["availableCourses"][user_input-1]["name"])
# calling a parents api
parent_api = "http://saral.navgurukul.org/api/courses/"+str(data["availableCourses"][user_input-1]["id"])+"/exercises"
parent_url = requests.get(parent_api)
# parents api convert into a json
data_1 = parent_url.json()
# pusing a parents data into a json file
with open ("data.json","w") as child_data:
json.dump(data_1,child_data,indent=4)
def data():
serial_no_1=0
for i in data_1["data"]:
print(" ",serial_no_1+1,".",i["name"])
if len(i["childExercises"])>0:
s= 0
for j in i['childExercises']:
s = s+ 1
print( " ",s,j['name'])
else:
print(" 1",i["slug"])
serial_no_1+=1
data()
print("")
topic_no = int(input("Enter topic number that's you want to learn:- "))
serial_no_3= 0
my_list=[]
for l in data_1['data']:
serial_no_3+=1
if topic_no == serial_no_3:
user_input_3=input("Enter topic number that's you want to learn previous or next:- ")
if user_input_3=="p":
serial_no_1=0
for i in data_1["data"]:
print(" ",serial_no_1+1,".",i["name"])
if len(i["childExercises"])>0:
s= 0
for j in i['childExercises']:
s = s+ 1
print( " ",s,j['name'])
else:
print(" 1",i["slug"])
serial_no_1+=1
topic_no = int(input("Enter topic number that's you want to learn:- "))
m = 0
while m < len(data_1["data"][topic_no-1]["childExercises"]):
print(" ", m+1 ,data_1["data"][topic_no-1]["childExercises"][m]["name"])
slug = (data_1["data"][topic_no-1]["childExercises"][m]["slug"])
# calling a child exercise
child_exercises_url = ("http://saral.navgurukul.org/api/courses/" + str(parent_id) +"/exercise/getBySlug?slug=" + slug )
Data_3 = requests.get(child_exercises_url)
# converting data into a json file
convert_data = Data_3.json()
with open("content.json","w") as f:
json.dump(convert_data,f,indent=4)
my_list.append(convert_data["content"])
m = m + 1
# And then taking a user input in a choose the questions....
def Question():
questions_no = int(input("choose the specific questions no :- "))
question=questions_no-1
print(my_list[question])
while questions_no > 0 :
# Here a taking user input in a previous or next
next_question = input("do you next question or previous question n/p :- ")
if questions_no == len(my_list):
print("next page")
if next_question == "p" :
if questions_no == 1:
print("no more questions")
break
elif questions_no > 0:
questions_no = questions_no - 2
print(my_list[questions_no])
elif next_question == "n":
if questions_no < len(my_list):
index = questions_no + 1
print(my_list[index-1])
question = question + 1
questions_no = questions_no + 1
if question == (len(my_list)-1) :
print("next page")
break
Question()
saral() | [
"noreply@github.com"
] | pooja-pichad.noreply@github.com |
ccbc96dcb76deb5429a028de11d8bc19bc885cb9 | d9fb6c246965cbf290186268298859ddb913ee6e | /190930/test.py | 520808d2c39e67f84e60d7b5df8b7b7ab4bd31bd | [] | no_license | 91hongppie/algorithm | 1ca6d54de6eab252c708bf83835ace8a109d73fc | 4c2fa8178e0ef7afbf0b736387f05cbada72f95d | refs/heads/master | 2020-07-20T22:17:40.700366 | 2020-06-29T00:06:11 | 2020-06-29T00:06:11 | 206,717,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | import sys
sys.stdin = open('test.txt', 'r')
# def test(c, rc):
# global result, nums
# if rc not in result:
# result.append(rc)
# if c == nums:
# return
# test(c+1, rc+scores[c])
# test(c+1, rc)
N = int(input())
for tc in range(1, N+1):
nums = int(input())
scores = list(map(int, input().split()))
result = [0]*(sum(scores)+1)
result[0] = 1
u = 0
for i in range(len(scores)):
for j in range(sum(scores), -1, -1):
u += 1
if result[j]:
result[scores[i]+j] = 1
a = result.count(1)
print(u)
print('#{} {}'.format(tc, a))
| [
"91hongppie@gmail.com"
] | 91hongppie@gmail.com |
20245bfb78d653c8237477beb7e295e82abc8728 | cc1d44cf04e5b2b15bb296a434aad4ae4bcfc4be | /python3/sql/getloc.py | e7364928c50915bd728d4d97890c8e206995c6dd | [] | no_license | ericosur/ericosur-snippet | dda2200546b13fb9b84632d115a0f4ca5e3d5c47 | 0309eeb614612f9a35843e2f45f4080ae03eaa81 | refs/heads/main | 2023-08-08T04:54:05.907435 | 2023-07-25T06:04:01 | 2023-07-25T06:04:01 | 23,057,196 | 2 | 1 | null | 2022-08-31T09:55:19 | 2014-08-18T03:18:52 | Perl | UTF-8 | Python | false | false | 1,216 | py | #!/usr/bin/python3
# coding: utf-8
'''
query players.db to find location coordinates
'''
import sqlite3
import sys
try:
from hexdump import hexdump
except ImportError:
print("need install module: hexdump")
sys.exit(1)
class Solution():
''' sqlite and query '''
def __init__(self):
self.dbfile = 'players.db'
self.con = sqlite3.connect(self.dbfile)
@staticmethod
def show_blob(buffer):
''' show blob '''
print(type(buffer))
print(len(buffer))
hexdump(buffer)
def query_blob(self):
''' query blob '''
res = self.con.execute("SELECT data FROM localPlayers")
blob = res.fetchone()
self.show_blob(blob[0])
def query_xy(self):
''' query x, y location '''
res = self.con.execute("SELECT name,x,y FROM localPlayers")
(name, x, y) = res.fetchone()
print(f'player name: {name}')
x = int(x)
y = int(y)
print(f'https://map.projectzomboid.com/#{x}x{y}')
def run(self):
''' run '''
self.query_xy()
#self.query_blob()
def main():
''' main '''
sol = Solution()
sol.run()
if __name__ == '__main__':
main()
| [
"ericosur@gmail.com"
] | ericosur@gmail.com |
f67179075a44b0e73699d2357779d53d1c60decb | 868ac4e558cf5fe945e8b557564f34f79b3ad01e | /purity_fb/purity_fb_1dot10/models/directory_service_response.py | 2fc1bc7144e633dfc9badfed480f1a506388d3ff | [
"Apache-2.0"
] | permissive | mabdelhafez/purity_fb_python_client | f4253ce8497fb3cff648e0a0cd1e567f48129fa7 | a9856875b3df43b4302a2e4addd1a6b71f51f5ce | refs/heads/master | 2022-04-20T09:24:22.031408 | 2020-04-20T22:11:32 | 2020-04-20T22:15:44 | 257,372,596 | 0 | 0 | NOASSERTION | 2020-04-20T18:40:24 | 2020-04-20T18:40:23 | null | UTF-8 | Python | false | false | 4,297 | py | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.10 Python SDK
Pure Storage FlashBlade REST 1.10 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.10
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class DirectoryServiceResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[DirectoryService]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
"""
DirectoryServiceResponse - a model defined in Swagger
"""
self._pagination_info = None
self._items = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""
Gets the pagination_info of this DirectoryServiceResponse.
pagination information, only available in GET requests
:return: The pagination_info of this DirectoryServiceResponse.
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""
Sets the pagination_info of this DirectoryServiceResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this DirectoryServiceResponse.
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""
Gets the items of this DirectoryServiceResponse.
A list of directory service objects.
:return: The items of this DirectoryServiceResponse.
:rtype: list[DirectoryService]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this DirectoryServiceResponse.
A list of directory service objects.
:param items: The items of this DirectoryServiceResponse.
:type: list[DirectoryService]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, DirectoryServiceResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mabdelhafez@purestorage.com"
] | mabdelhafez@purestorage.com |
0d2788ab9aafd86f7d209b60a0b3697107636ab2 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/102/usersdata/212/49943/submittedfiles/av1_2.py | a70d5686bf818087b807f4b0822fd2efe3e0d999 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | # -*- coding: utf-8 -*-
import math
n1=int(input('digite o primeiro valor da sequência:'))
n2=int(input('digite o segundo valor da sequência:'))
n3=int(input('digite o terceiro valor da sequência:'))
n4=int(input('digite o quarto valor da sequência:'))
if (n1==n3) or (n2==n4):
print('V')
else:
print('N') | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
7f8d73f6f3bba6793aeb102ca4f67544ac169569 | ec6cb8542c8ed962d24ca32fc1f060ef63fdfea7 | /第一阶段/课上实例练习/13继承_重构_repr_/epidemic_information_system_v1.py | a8e759adedeb2f5fb91ff3d0795a5b9b4f5debde | [] | no_license | singerdo/songers | 27859a4ff704318d149b2aa6613add407d88bb5d | 9c5dcd80c6772272c933b06c156b33058cbd3ce4 | refs/heads/master | 2022-04-16T11:00:11.002138 | 2020-04-18T07:15:16 | 2020-04-18T07:15:16 | 256,686,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,688 | py | """
疫情信息系统
需求:
输入1,循环获取疫情信息.
练习1:
需求:
输入2,显示所有疫情信息.
步骤:
在View中判断是否输入"2"
在Controller中定义__list_epidemics的只读属性
在View中显示信息
"""
class EpidemicInformationModel:
"""
疫情信息模型
"""
def __init__(self, region="", confirmed=0, dead=0, cure=0, eid=0):
self.region = region
self.confirmed = confirmed
self.dead = dead
self.cure = cure
self.eid = eid
class EpidemicInformationView:
"""
疫情信息界面视图:负责处理界面逻辑
"""
def __init__(self):
self.__controller = EpidemicInformationController()
def __show_menu(self):
while True
try:
print("输入1键盘录入疫情信息")
break
except:
print("输入有误,请重新输入")
while True
try:
print("输入2键盘显示疫情信息")
break
except:
print("输入有误,请重新输入")
while True
try:
print("输入3键盘查找疫情信息")
break
except:
print("输入有误,请重新输入")
while True
try:
print("输入4键盘删除疫情信息")
break
except:
print("输入有误,请重新输入")
def __select_menu(self):
item = input("请输入选项:")
if item == "1":
self.__input_epidemics()
elif item == "2":
self.__print_epidemics()
elif item == "3":
self.__select_epidemic()
elif item == "4":
self.__delete_epidemic()
def main(self):
while True:
self.__show_menu()
self.__select_menu()
def __input_epidemics(self):
while True:
region = input("请输入地区,如需退出输入空字符:")
model = EpidemicInformationModel(region)
model.confirmed = int(input("请输入确诊人数:"))
model.dead = int(input("请输入死亡人数:"))
model.cure = int(input("请输入治愈人数:"))
# 存储当前数据...
self.__controller.add_epidemic(model)
def __print_epidemics(self):
for info in self.__controller.list_epidemics:
self.__print_epidemic(info)
def __print_epidemic(self, info):
print("%s的确诊人数%d,死亡人数%d,治愈人数%d" % (info.region, info.confirmed, info.dead, info.cure))
def __select_epidemic(self):
region = input("请输入地区:")
epidemic = self.__controller.get_epidemic_by_region(region)
if epidemic:
self.__print_epidemic(epidemic)
else:
print("您输入的地区没有疫情")
def __delete_epidemic(self):
eid = int(input("请输入需要删除的疫情信息编号:"))
if self.__controller.remove_epidemic_by_id(eid):
print("删除成功")
else:
print("删除失败")
class EpidemicInformationController:
"""
疫情信息逻辑控制器:负责处理业务逻辑
"""
def __init__(self):
self.__list_epidemics = []
self.__eid_begin = 1000
@property
def list_epidemics(self):
return self.__list_epidemics
def add_epidemic(self, info):
"""
添加疫情信息
:param info: 需要添加的信息
"""
# 设置信息的编号
info.eid = self.__eid_begin
self.__eid_begin += 1
# 存储列表
self.__list_epidemics.append(info)
def get_epidemic_by_region(self, region):
"""
根据地区获取疫情信息
:param region:
:return:
"""
for epidemic in self.__list_epidemics:
if epidemic.region == region:
return epidemic
def remove_epidemic_by_id(self, eid):
"""
根据编号删除疫情信息
:param eid:
:return:是否删除成功
"""
for i in range(len(self.__list_epidemics)):
if self.__list_epidemics[i].eid == eid:
# 使用del删除,后面必须索引或者切片定位的列表元素
del self.__list_epidemics[i]
return True
return False
# 入口
view = EpidemicInformationView()
view.main()
#每个都输入吗 | [
"569593546@qq.com"
] | 569593546@qq.com |
962ecb9cd49d0303277c9be28f8389ecfc558290 | 9f0a4262c4402201df1cdd5674a679543f4a50b5 | /studio_maya/resources/__init__.py | b007269247729602677317a32043f9797e916de2 | [] | no_license | subing85/subins-toolkits | 611b6b3b3012ccb023096f6e21d18d2bda5a534b | d02af1289ec3ee5bce6fa3d78c134a8847113aa6 | refs/heads/master | 2022-07-12T17:19:57.411454 | 2022-07-01T20:37:16 | 2022-07-01T20:37:16 | 168,826,548 | 11 | 2 | null | 2022-07-02T01:03:34 | 2019-02-02T11:51:25 | Mathematica | UTF-8 | Python | false | false | 2,342 | py | import os
import tempfile
import platform
CURRENT_PATH = os.path.dirname(__file__)
def getToolKit():
return "Studio Maya Interpreter", "smi", "0.0.1"
def getModuleName():
return "studio_maya"
def getIconPath():
return os.path.join(CURRENT_PATH, "icons")
def getLogo():
return os.path.join(CURRENT_PATH, "icons", "logo.png")
def getWorkspacePath():
if platform.system() == "Windows":
return os.path.join(
os.getenv("USERPROFILE"),
"Documents",
"studio_toolkits",
getModuleName(),
)
if platform.system() == "Linux":
return os.path.join(
os.getenv("HOME"),
"Documents",
"studio_toolkits",
getModuleName(),
)
def getPreferenceFile():
preference_path = os.path.join(getWorkspacePath(), "preference")
if not os.path.isdir(preference_path):
os.makedirs(preference_path)
return os.path.join(preference_path, "config.xml")
def getOperatingSystem():
return platform.system()
def getRootPath():
operating_system = getOperatingSystem()
if operating_system == "Windows":
return "C:/", "Autodesk/Maya", "mayapy.exe"
if operating_system == "Linux":
return "/", "autodesk/maya", "mayapy"
def getEditor():
operating_system = getOperatingSystem()
if operating_system == "Windows":
return "start wordpad"
if operating_system == "Linux":
return "kwrite"
def getFormats():
formats = {
"maya": [".ma", ".mb"],
"code": [".py", ".pyc", ".mel"],
}
return formats
def getImages():
images = {
".ma": "maya_ascii",
".mb": "maya_binary",
".py": "python",
".pyc": "Python_compile",
".mel": "mel",
}
return images
def getTempCodeFile():
return os.path.join(tempfile.gettempdir(), "studio_maya_temp.py")
def getInputPath():
path = os.path.join(CURRENT_PATH, "inputs").replace("\\", "/")
return path
def getScriptPath():
path = os.path.join(CURRENT_PATH, "scripts")
return path
def getToolKitLink():
return "https://www.subins-toolkits.com"
def getToolKitHelpLink():
return "https://www.subins-toolkits.com/studio-maya"
def getDownloadLink():
return "https://www.subins-toolkits.com/studio-maya"
| [
"subing85@gmail.com"
] | subing85@gmail.com |
eeab448cdc59c08d59f662483396beaf151639e5 | 9268f5f8ccbc91322eb12c5cc0be53e7678aeff7 | /docs/source/pgf2img.py | 0501d4a6cda356d0178a518747924bd1e7c344d8 | [
"BSD-3-Clause"
] | permissive | JupiterEthan/ted.python | 7f5e462a064b351d0520d73a3972be151979be23 | 1698a7f792db23123003ae4e2d39b4c18f25f347 | refs/heads/master | 2020-05-29T09:14:55.478502 | 2015-12-06T15:30:01 | 2015-12-06T15:30:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,884 | py | #!/usr/bin/env python
"""
Convert PGF files to an image file using pdflatex, pdfcrop, (from
texlive) and convert (from ImageMagick).
"""
import os
import sys
import tempfile
import time
import subprocess
def __find_exec(executable):
'''Try to find an executable in the system path.'''
if os.path.isfile(executable):
return executable
else:
paths = os.environ['PATH'].split(os.pathsep)
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
return f
return ''
def __check_for_exec(executable, msg):
'''Exit on error if the specified executable cannot be
found. Otherwise, return the path to the executable.'''
path = __find_exec(executable)
if path == '':
print msg
sys.exit()
else:
return path
def __run_cmd(cmd, msg, cwd=None, wait=30):
'''Run a system command; display an error message if it returns a
nonzero exit code or it stalls for more than the specified number
of seconds.'''
dev_null = open('/dev/null', 'w')
p = subprocess.Popen(cmd, stdout=dev_null, stderr=dev_null, shell=True, cwd=cwd)
tic = time.time()
while p.returncode == None and time.time() < tic+wait:
try:
p.poll()
except KeyboardInterrupt:
print 'manually killing command %s' % cmd
p.kill()
sys.exit(1)
if p.returncode == None:
print 'killing stalled command %s ' % cmd
p.kill()
if p.returncode < 0:
print msg
sys.exit(1)
# Check for required executables:
PDFLATEX = __check_for_exec('pdflatex', 'cannot find pdflatex')
PDFCROP = __check_for_exec('pdfcrop', 'cannot find pdfcrop')
CONVERT = __check_for_exec('convert', 'cannot find convert')
RM = __check_for_exec('rm', 'cannot find rm')
# Used to redirect program output to /dev/null:
redirect_output = ' 1>/dev/null 2>&1'
# Defaults:
default_template = """\\documentclass[10pt]{article}
\\usepackage{amsmath,amssymb,amsbsy,amsfonts,amsthm}
\\usepackage[landscape]{geometry}
\\usepackage{cmbright}
\\usepackage{tikz}
\\pagestyle{empty}
\\begin{document}
<>
\\end{document}
"""
default_density = 200
def pgf2img(input_filename, output_filename,
template=default_template, density=default_density):
"""Convert a PGF/TikZ file to an image file.
Parameters
----------
input_filename : str
Name of input PGF/TikZ file. The file must contain a
tikzpicture environment.
output_filename : str
Name of output file. The image format is determined
by the filename extension.
template : str
LaTeX template used to generate image.
density : int
Output image density (in DPI).
"""
# Open the input file:
try:
input_file = open(input_filename, 'r')
except IOError:
print 'error opening input file %s' % input_filename
sys.exit(1)
else:
input_data = ''.join(input_file.readlines())
# Combine the template and input file:
temp_data = template.replace('<>',input_data)
# Write the output to a temporary LaTeX file:
try:
temp_dirname = tempfile.mkdtemp()+os.sep
except IOError:
print 'error creating temporary directory %s' % temp_dirname
sys.exit(1)
else:
temp_latex_filename = temp_dirname + 'temp.tex'
try:
temp_latex_file = open(temp_latex_filename,'w')
except IOError:
print 'error opening temporary LaTeX file %s' % temp_latex_filename
sys.exit(1)
else:
temp_latex_file.writelines(temp_data.splitlines(True))
temp_latex_file.close()
# Process the temporary file with pdflatex:
__run_cmd(PDFLATEX + ' ' +
temp_latex_filename,
'error running pdflatex', temp_dirname)
# Crop the file with pdfcrop:
temp_latex_basename = os.path.splitext(temp_latex_filename)[0]
temp_pdf_filename = temp_latex_basename + '.pdf'
temp_pdf_cropped_filename = temp_latex_basename + '_cropped.pdf'
__run_cmd(PDFCROP + ' ' +
temp_pdf_filename + ' ' +
temp_pdf_cropped_filename,
'error running pdfcrop',temp_dirname)
# If the specified output file format is pdf, there is no need to run
# the generated file through convert:
output_ext = os.path.splitext(output_filename)[1]
if output_ext.lower() == '.pdf':
os.rename(temp_pdf_cropped_filename, output_filename)
else:
__run_cmd(CONVERT + ' -density ' + str(density) + ' ' +
temp_pdf_cropped_filename + ' ' +
output_filename,
'error running convert')
# Clean up the temporary work directory:
__run_cmd(RM + ' -rf ' + temp_dirname,
'error removing temporary directory %s' % temp_dirname)
| [
"lev@columbia.edu"
] | lev@columbia.edu |
d0a8cb4d9fa41a0759b2ffea4039c1e1f5186a15 | 19603633d723d3b824ca9bce2994ce7e63dd1fc9 | /tests/integration/test_copy.py | 8073bc43754023f14388f1e5890dd8f5cb9091fb | [
"Apache-2.0"
] | permissive | TingDaoK/s3transfer | 710d761cc7406ff477291b45a21105f870f68813 | 95f34d02275d716addb6fe2f8aa5327ceff98e3d | refs/heads/develop | 2023-04-20T14:18:25.590390 | 2020-12-18T22:12:01 | 2020-12-18T22:12:01 | 319,548,025 | 0 | 1 | Apache-2.0 | 2021-05-31T23:10:42 | 2020-12-08T06:32:04 | Python | UTF-8 | Python | false | false | 2,986 | py | # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import RecordingSubscriber
from tests.integration import BaseTransferManagerIntegTest
from s3transfer.manager import TransferConfig
class TestCopy(BaseTransferManagerIntegTest):
def setUp(self):
super(TestCopy, self).setUp()
self.multipart_threshold = 5 * 1024 * 1024
self.config = TransferConfig(
multipart_threshold=self.multipart_threshold)
def test_copy_below_threshold(self):
transfer_manager = self.create_transfer_manager(self.config)
key = '1mb.txt'
new_key = '1mb-copy.txt'
filename = self.files.create_file_with_size(
key, filesize=1024 * 1024)
self.upload_file(filename, key)
future = transfer_manager.copy(
copy_source={'Bucket': self.bucket_name, 'Key': key},
bucket=self.bucket_name,
key=new_key
)
future.result()
self.assertTrue(self.object_exists(new_key))
def test_copy_above_threshold(self):
transfer_manager = self.create_transfer_manager(self.config)
key = '20mb.txt'
new_key = '20mb-copy.txt'
filename = self.files.create_file_with_size(
key, filesize=20 * 1024 * 1024)
self.upload_file(filename, key)
future = transfer_manager.copy(
copy_source={'Bucket': self.bucket_name, 'Key': key},
bucket=self.bucket_name,
key=new_key
)
future.result()
self.assertTrue(self.object_exists(new_key))
def test_progress_subscribers_on_copy(self):
subscriber = RecordingSubscriber()
transfer_manager = self.create_transfer_manager(self.config)
key = '20mb.txt'
new_key = '20mb-copy.txt'
filename = self.files.create_file_with_size(
key, filesize=20 * 1024 * 1024)
self.upload_file(filename, key)
future = transfer_manager.copy(
copy_source={'Bucket': self.bucket_name, 'Key': key},
bucket=self.bucket_name,
key=new_key,
subscribers=[subscriber]
)
future.result()
# The callback should have been called enough times such that
# the total amount of bytes we've seen (via the "amount"
# arg to the callback function) should be the size
# of the file we uploaded.
self.assertEqual(subscriber.calculate_bytes_seen(), 20 * 1024 * 1024)
| [
"kyleknap@amazon.com"
] | kyleknap@amazon.com |
e7bc57d1a028c2d6796b49ebe2c1b947e021eb85 | 1ed536ef1527e6655217e731f622d643ece49c2b | /scripts/gpipe/pairs2gene_structure.py | 1b0134cfab7ba4d9bd647eed809068b2b6b31891 | [] | no_license | siping/cgat | de0f7af124eb38c72d7dece78fff83ff92ddbf96 | aa4cc85ffdc53998ea1a5ac5516df2d16c254d2e | refs/heads/master | 2021-01-22T13:03:18.060139 | 2013-10-07T15:53:55 | 2013-10-07T15:53:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,934 | py | ################################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#################################################################################
'''
gpipe/pairs2gene_structure.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python gpipe/pairs2gene_structure.py --help
Type::
python gpipe/pairs2gene_structure.py --help
for command line help.
Documentation
-------------
Code
----
'''
import os
import sys
import string
import re
import getopt
USAGE="""python %s [OPTIONS] < assignments > pairs
Version: $Id: gpipe/pairs2gene_structure.py 1799 2008-03-28 11:44:19Z andreas $
Take a list of orthologous transcripts and write out a list
of orthologous transcripts.
Options:
-h, --help print this message.
-v, --verbose loglevel.
-g, --genome-file= pattern for filenames with the genomic DNA (FASTA).
-c, --cds= filename with coding sequences
-f, --format= output format, valid options are:
paired_fasta: concatenated pairwise alignments in FASTA format
""" % sys.argv[0]
import CGAT.Experiment as E
import CGAT.Genomics as Genomics
import CGAT.PredictionParser as PredictionParser
import alignlib
param_long_options=["verbose=", "help", "genome-file=", "format=",
"cds=", "version"]
param_short_options="v:hg:f:c:"
param_loglevel = 0
## pattern for genomes, %s is substituted for the sbjct_token
param_genome_file = "genome_%s.fasta"
## filename with cdss
param_filename_cdss = "cds.fasta"
## output format
param_format = "paired_fasta"
## prefix/suffix for output files
param_filename_suffix = ".fasta"
param_filename_prefix = ""
##------------------------------------------------------------
if __name__ == '__main__':
try:
optlist, args = getopt.getopt(sys.argv[1:],
param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o,a in optlist:
if o in ( "-v", "--verbose" ):
param_loglevel = int(a)
elif o in ( "-h", "--help" ):
print USAGE
sys.exit(0)
elif o in ( "--version", ):
print "version="
sys.exit(0)
elif o in ("-g", "--genome-file"):
param_genome_file = a
elif o in ("-c", "--cds"):
param_filename_cds = a
if len(args) > 0:
print USAGE, "no arguments required."
sys.exit(1)
print E.GetHeader()
print E.GetParams()
## reading CDS sequences
if param_filename_cds:
cds_sequences = Genomics.ReadPeptideSequences( open(param_filename_cds, "r") )
else:
cds_sequences = {}
if param_loglevel >= 1:
print "# read %i CDS sequences" % len(cds_sequences)
last_filename_genome = None
p = PredictionParser.PredictionParserEntry()
for line in sys.stdin:
if line[0] == "#": continue
if line[0] == '"': continue
p.Read(line)
## read genomic sequence
if "%s" in param_genome_file:
filename_genome = param_genome_file % p.mSbjctToken
else:
filename_genome = param_genome_file
if last_filename_genome != filename_genome:
if param_loglevel >= 2:
print "# reading genome %s" % filename_genome
forward_sequences, reverse_sequences = Genomics.ReadGenomicSequences( open(filename_genome, "r"))
last_filename_genome = filename_genome
if p.mSbjctStrand == "+":
genomic_sequence = forward_sequences[p.mSbjctToken]
else:
genomic_sequence = reverse_sequences[p.mSbjctToken]
try:
cds_fragment = cds_sequences[p.mQueryToken]
except KeyError:
print "# ERROR: cds not found: query %s." % p.mQueryToken
continue
genomic_fragment = genomic_sequence[p.mSbjctGenomeFrom:p.mSbjctGenomeTo]
if len(genomic_fragment) == 0:
raise "ERROR: empty fragment %s:%s for line" % (p.mSbjctGenomeFrom, p.mSbjctGenomeTo), line
map_query2sbjct, genomic_fragment = Genomics.Alignment2CDNA( p.mMapPeptide2Genome,
query_from = p.mQueryFrom - 1,
sbjct_from = 0,
genome = genomic_fragment )
## check for errors:
if map_query2sbjct.getRowTo() != p.mQueryTo * 3:
print str(p)
raise "# ERROR: boundary shift in query: %i %i" %( map_query2sbjct.getRowTo(), p.mQueryTo * 3 )
if map_query2sbjct.getColTo() > len(genomic_fragment):
print "# ERROR: length mismatch: genomic fragment (%i) shorter than last aligned residue (%i)" %\
(len(genomic_fragment), map_query2sbjct.getColTo())
print "#", line
print "# cds"
print "#", cds_fragment
print "# genomic"
print "#",genomic_fragment
continue
if map_query2sbjct.getRowTo() > len(cds_fragment):
print "# ERROR: length mismatch: cds fragment (%i) shorter than last aligned residue (%i)" %\
(len(cds_fragment), map_query2sbjct.getRowTo())
print "#", line
print "# cds"
print "#", cds_fragment
print "# genomic"
print "#",genomic_fragment
continue
cds_seq = alignlib.makeSequence( cds_fragment )
genomic_seq = alignlib.makeSequence( genomic_fragment )
data = map( lambda x: string.split(x, "\t"),
string.split( alignlib.writePairAlignment( cds_seq,
genomic_seq,
map_query2sbjct ), "\n" ))
row_ali, col_ali = Genomics.RemoveFrameShiftsFromAlignment(data[0][1], data[1][1])
row_ali = Genomics.MaskStopCodons( row_ali )
col_ali = Genomics.MaskStopCodons( col_ali )
if len(row_ali) != len(col_ali):
print "# ERROR: wrong alignment lengths."
sys.exit(1)
if len(row_ali) % 3 or len(col_ali) % 3:
print line
print row_ali
print col_ali
print len(row_ali), len(col_ali)
print " ERROR: non-codons in alignment."
sys.exit(1)
print ">%i\n%s" % (p.mPredictionId, row_ali)
print ">%s_vs_%s_%s_%i_%i\n%s" % \
(p.mQueryToken, p.mSbjctToken, p.mSbjctStrand, p.mSbjctGenomeFrom, p.mSbjctGenomeTo, col_ali)
print E.GetFooter()
| [
"andreas.heger@gmail.com"
] | andreas.heger@gmail.com |
bb1f91a8e88acabf8432b01b997fbc03d8e0aa9a | 7b065a6b01905a2da6ad2d00b6398aad150dc6c3 | /基础知识/4.文件操作/4.write()方法.py | 03c9d3080ae416f6c9431364f8d419f445021f9e | [] | no_license | ylwctyt/python3-1 | f4b0d8d6d0a7947170186b27bf51bc2f6e291ac7 | ca92e2dc9abc61265e48b7809cb12c3e572b5b6f | refs/heads/master | 2021-04-18T18:56:46.047193 | 2018-03-25T04:35:11 | 2018-03-25T04:35:11 | 126,699,773 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | import json
# f = open("test_write.txt","w",encoding="utf-8")
# f.write("HelloWorld\n")
# f.write("HelloWorld\n")
# f.close()
f = open("test_write.txt", "w", encoding="utf-8")
lines = ["1", "2", "3", "4", "5"]
# lines = [line+'\n' for line in lines]
f.writelines(lines) # 写入一个列表
f.close()
# json 写入到文件
dict = {"key1": "val2", "key2": "val2", "key3": "val3"}
with open("test.txt", mode="a", encoding="utf8") as f:
for i in range(10):
f.write(json.dumps(dict, ensure_ascii=False))
f.write("\n")
| [
"359405466@qq.com"
] | 359405466@qq.com |
fc541b4fe07329e28fb81c87bf310cfde8ff531f | 8988a329c571cb04a5d97c691d0cd8bc4caf81d4 | /benchmarks/variables.py | 414b9a4cb3dafad00e29172ae8b4aee904f332d4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dwavesystems/dimod | 85329cbee86bdf5a73de05fa25884c877ea53002 | 8433f221a1e79101e1db0d80968ab5a2f59b865d | refs/heads/main | 2023-08-29T08:37:24.565927 | 2023-08-17T17:14:58 | 2023-08-17T17:14:58 | 100,658,303 | 118 | 93 | Apache-2.0 | 2023-09-13T18:15:37 | 2017-08-18T01:02:17 | Python | UTF-8 | Python | false | false | 1,568 | py | # Copyright 2022 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dimod.variables import Variables
class TimeConstuction:
num_variables = 1000
iterables = dict(range=range(num_variables),
strings=list(map(str, range(num_variables))),
integers=list(range(1000)),
empty=[],
none=None,
variables=Variables(range(1000)),
)
params = iterables.keys()
param_names = ['iterable']
def time_construction(self, key):
Variables(self.iterables[key])
class TimeIteration:
num_variables = 1000
variables = dict(string=Variables(map(str, range(num_variables))),
index=Variables(range(num_variables)),
integer=Variables(range(num_variables, 0, -1))
)
params = variables.keys()
param_names = ['labels']
def time_iteration(self, key):
for v in self.variables[key]:
pass
| [
"arcondello@gmail.com"
] | arcondello@gmail.com |
778faa010de5bb8612053f55b33329ac19019012 | cce8469586694aeea759a577c77bbac0652bec6f | /detectron2/config/config.py | c8270fd7ee447cd10497dc6253627ab0660c67c5 | [
"Apache-2.0"
] | permissive | veraposeidon/detectron2 | 9a4553289111bf6a83ecd3361eab836fb5ea076b | df2f2ab213e5c089ebc65b84786f766ba2b2b5d5 | refs/heads/master | 2020-09-26T09:05:06.044612 | 2019-12-30T09:38:29 | 2019-12-30T09:38:29 | 226,223,447 | 3 | 0 | Apache-2.0 | 2019-12-22T13:16:50 | 2019-12-06T01:47:30 | Jupyter Notebook | UTF-8 | Python | false | false | 3,560 | py | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from fvcore.common.config import CfgNode as _CfgNode
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
"""
# Note that the default value of allow_unsafe is changed to True
def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
loaded_cfg = _CfgNode.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
loaded_cfg = type(self)(loaded_cfg)
# defaults.py needs to import CfgNode
from .defaults import _C
latest_ver = _C.VERSION
assert (
latest_ver == self.VERSION
), "CfgNode.merge_from_file is only allowed on a config of latest version!"
logger = logging.getLogger(__name__)
loaded_ver = loaded_cfg.get("VERSION", None)
if loaded_ver is None:
from .compat import guess_version
loaded_ver = guess_version(loaded_cfg, cfg_filename)
assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
loaded_ver, self.VERSION
)
if loaded_ver == self.VERSION:
self.merge_from_other_cfg(loaded_cfg)
else:
# compat.py needs to import CfgNode
from .compat import upgrade_config, downgrade_config
logger.warning(
"Loading an old v{} config file '{}' by automatically upgrading to v{}. "
"See docs/CHANGELOG.md for instructions to update your files.".format(
loaded_ver, cfg_filename, self.VERSION
)
)
# To convert, first obtain a full config at an old version
old_self = downgrade_config(self, to_version=loaded_ver)
old_self.merge_from_other_cfg(loaded_cfg)
new_config = upgrade_config(old_self)
self.clear()
self.update(new_config)
def dump(self, *args, **kwargs):
"""
Returns:
str: a yaml string representation of the config
"""
# to make it show up in docs
return super().dump(*args, **kwargs)
global_cfg = CfgNode()
def get_cfg() -> CfgNode:
"""
Get a copy of the default config.
Returns:
a detectron2 CfgNode instance.
"""
from .defaults import _C
return _C.clone()
def set_global_cfg(cfg: CfgNode) -> None:
"""
Let the global config point to the given cfg.
Assume that the given "cfg" has the key "KEY", after calling
`set_global_cfg(cfg)`, the key can be accessed by:
.. code-block:: python
from detectron2.config import global_cfg
print(global_cfg.KEY)
By using a hacky global config, you can access these configs anywhere,
without having to pass the config object or the values deep into the code.
This is a hacky feature introduced for quick prototyping / research exploration.
"""
global global_cfg
global_cfg.clear()
global_cfg.update(cfg)
| [
"veraposeidon@gmail.com"
] | veraposeidon@gmail.com |
7da94cb8e28b840f977542ad552b721f062f4c0f | e5ea9950d5f64b1e5ab7dad5ef63f5b443ca52ed | /API_Engine/models/fields.py | a904ded47eb68374eb66d8535c6abad620feb90d | [] | no_license | MediKnot/MediKnot-AI | f2e5c55d09a036580706470e6c1d8f6d7dc9635b | 97528506c25d3b31d404f2e181a39a887dbe2bb4 | refs/heads/main | 2023-06-03T13:33:46.483665 | 2021-06-24T14:15:21 | 2021-06-24T14:15:21 | 376,215,473 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,449 | py | """MODELS - FIELDS
Definition of Fields used on model classes attributes.
We define them separately because the PersonUpdate and PersonCreate models need to re-define their attributes,
as they change from Optional to required.
Address could define its fields on the model itself, but we define them here for convenience
"""
# # Installed # #
from pydantic import Field
# # Package # #
from ..utils import get_time, get_uuid
__all__ = ("PersonFields", "AddressFields")
_string = dict(min_length=1)
"""Common attributes for all String fields"""
_unix_ts = dict(example=get_time())
"""Common attributes for all Unix timestamp fields"""
class PersonFields:
name = Field(
description="Full name of this person",
example="John Smith",
**_string
)
address = Field(
description="Address object where this person live"
)
address_update = Field(
description=f"{address.description}. When updating, the whole Address object is required, as it gets replaced"
)
birth = Field(
description="Date of birth, in format YYYY-MM-DD, or Unix timestamp",
example="1999-12-31"
)
age = Field(
description="Age of this person, if date of birth is specified",
example=20
)
person_id = Field(
description="Unique identifier of this person in the database",
example=get_uuid(),
min_length=36,
max_length=36
)
"""The person_id is the _id field of Mongo documents, and is set on PeopleRepository.create"""
created = Field(
alias="created",
description="When the person was registered (Unix timestamp)",
**_unix_ts
)
"""Created is set on PeopleRepository.create"""
updated = Field(
alias="updated",
description="When the person was updated for the last time (Unix timestamp)",
**_unix_ts
)
"""Created is set on PeopleRepository.update (and initially on create)"""
class AddressFields:
street = Field(
description="Main address line",
example="22nd Bunker Hill Avenue",
**_string
)
city = Field(
description="City",
example="Hamburg",
**_string
)
state = Field(
description="State, province and/or region",
example="Mordor",
**_string
)
zip_code = Field(
description="Postal/ZIP code",
example="19823",
**_string
)
| [
"ajinkyataranekar@gmail.com"
] | ajinkyataranekar@gmail.com |
a706d366c2b89cc2de1fa44bab761e41a23254c9 | 5f4f3ab6ece4eda1066bda2f80b9cf89a898f409 | /0x0B-python-input_output/10-class_to_json.py | 7e9784dda2a105257bb1f5945467692817f849a3 | [] | no_license | s0m35h1t/holbertonschool-higher_level_programming | 8af7f3bc54159efa05859f81ca3b9fb1739190e8 | f3b7ddeabf41b5cbc8460841c429b4b3bf254fea | refs/heads/master | 2020-09-28T20:32:23.955579 | 2020-05-14T20:22:50 | 2020-05-14T20:22:50 | 226,859,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | #!/usr/bin/python3
"""
get __dict__ class description
"""
def class_to_json(obj):
"""returns the dictionary description with simple data
structure (list, dictionary, string, integer and boolean)
for JSON serialization of an object
Args:
obj (Object): file name
Returns:
(Object) : dict
"""
return obj.__dict__
| [
"adib.grouz@gmail.com"
] | adib.grouz@gmail.com |
b669fc22e51ea7d2d9e31e0ef07ecec2abbde3ba | 931c17844683a4fbbefcf2bb2d5468d08cce7dbd | /03.Data_Science/Test.py | 2ff749cabbb0693acb9c49bb983bc82a8e8e96eb | [] | no_license | sungwooman91/python_code | d7c8046089bf492b70d21f4ee5a8676377e70592 | fdf20690d761b533efef2f247719f598c14f50c8 | refs/heads/master | 2020-03-26T05:14:27.104725 | 2018-08-13T07:46:11 | 2018-08-13T07:46:13 | 144,545,758 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | import csv # 외부모듈 사용!!!
with open("Demographic_Statistics_By_Zip_Code.csv", newline="") as infile:
data = list(csv.reader(infile)) # 파일을 'data'라는 리스트로 출력
#
# ## get_csv_rowInstance(row_index)
# ## COUNT FEMALE
# def get_csv_rowInstance(row_name): # data를
# find_row = data[0].index(row_name)
# row_instance = []
# for row in data[1:]:
# row_instance.append(int(row[find_row]))
#
# return row_instance
#
# print(get_csv_rowInstance("COUNT MALE"))
def get_csv_colInstance(primary_key) :
for col_instance in data[1:]:
if col_instance[0] == primary_key : return col_instance
else : continue
print(get_csv_colInstance(10002))ddsd | [
"tjddn636@naver.com"
] | tjddn636@naver.com |
576c3dddbc0a2fb79bb514302124bcd8b6350115 | 7624e7fca387651e278e1e9911b37c675e3a599c | /面试题&剑指 Offer/面试题 08.08. 有重复字符串的排列组合.py | 0ec11ab8a9ef9a2062647d0c9544c4c35ca5027c | [] | no_license | homezzm/leetcode | 53269f1c94c040a41b03e4342d4c241e3f1102b5 | 63ac5a0921835b1e9d65f71e1346bbb7d66dad9b | refs/heads/master | 2023-03-03T09:28:16.974397 | 2021-02-15T03:21:17 | 2021-02-15T03:21:17 | 330,537,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | class Solution(object):
def permutation(self, S):
"""
https://leetcode-cn.com/problems/permutation-ii-lcci/
:type S: str
:rtype: List[str]
有重复字符串的排列组合。编写一种方法,计算某字符串的所有排列组合。
示例1:输入:S = "qqe" 输出:["eqq","qeq","qqe"]
"""
if not S or len(S) <= 1: return []
res, S, length = [], sorted(S), len(S) # 排下序,使重复的都在一起
def backtrack(used, paths):
if length == len(paths):
res.append(''.join(paths))
return
for i in range(length):
if used[i]:
continue # 已经选择过的不需要再放进去了
if i > 0 and S[i] == S[i - 1] and not used[i - 1]:
continue # 如果当前节点与他的前一个节点一样,并其他的前一个节点已经被遍历过了,那我们也就不需要了。
used[i] = True
paths.append(S[i])
backtrack(used, paths)
used[i] = False
paths.pop()
backtrack([False] * length, [])
return res
if __name__ == '__main__':
solution = Solution()
print(solution.permutation('qqe'))
| [
"homezzm@126.com"
] | homezzm@126.com |
5b9c41ad7bd7f537b5ec26e1e565ce2aa685a1a2 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1/manjeetacrocs2/a.py | 430d5f4ef1493a5eda7c5d37ab4445304677ec9b | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 352 | py | t=int(input()); j=1;
for _ in range(t):
str1=input()
if int(str1)==0:
print("Case #{}: INSOMNIA".format(j))
j+=1
continue
s=set(); s|=set(list(str1))
sum1=int(str1);count=0
while len(s)!=10:
sum1+=int(str1); l3=list(str(sum1)); s|=set(l3)
print("Case #{}: {}".format(j,sum1))
j+=1 | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
7a0f902f4fd2b98ce87a8b8ed1548b76623a4291 | 23514a0e2baf6da053690dd511f1eef75a573e6b | /log-mining/com/haodou/log-mining/CollectionUtil.py | 4ebb82ebf1ce316a6ec6163875d3288a73b45c03 | [] | no_license | rainly/scripts-1 | b5f31880a1a917df23e4c110bb7661685851eff5 | 3ef01a58162b94fb36cdd38581c899d8a118eda0 | refs/heads/master | 2020-07-12T01:41:31.491344 | 2019-08-27T08:50:10 | 2019-08-27T08:50:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py |
def cut(rs,N):
return sorted(rs.items(), key=lambda d: d[1],reverse=True)[0:N]
import random
def test():
N=5
rs={}
for i in range(N*2):
rs[i]=int(3*random.random())
print rs
cut(rs,N)
print rs
if __name__=="__main__":
test()
def halfCut(ts,min=0,max=100):
n=len(ts)/2
if n < min:
n=min
if n > max:
n=max
if n <= 0:
return {}
#print n
ret = cut(ts,n)
#print ret
return ret
| [
"zhaoweiguo@vxiaoke360.com"
] | zhaoweiguo@vxiaoke360.com |
ad582a3d28ae7f94a8654318676c5e54db2755de | 17dca703eed28a859bba4984eba5b039b900e3d7 | /operaciones/views.py | dc3dc8fde486273a340e37d206351d0718decdb2 | [] | no_license | alexogch1/SistemaOperaciones | 1a34872daf0e151672edd202a5089ee754805203 | ac72f6e3284061e240aebec6a3300ff463a3544c | refs/heads/master | 2021-01-03T15:32:45.470642 | 2020-03-03T07:47:27 | 2020-03-03T07:47:27 | 240,133,319 | 0 | 1 | null | 2020-02-28T05:21:57 | 2020-02-12T23:02:36 | Python | UTF-8 | Python | false | false | 6,699 | py | from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views .generic.base import TemplateView
from django.http.response import HttpResponse
from openpyxl import Workbook
from openpyxl.styles import Alignment, Border,Font,PatternFill,Side
from django.views import generic
from django.urls import reverse_lazy
from dateutil.parser import parse
from .models import TipoCambio
from .form import TipoCambioForm
class TipoCambioView(LoginRequiredMixin, generic.ListView):
model = TipoCambio
template_name = "operaciones/tc_list.html"
context_object_name = "obj"
login_url = "base:login"
class TipoCambioNew(LoginRequiredMixin, generic.CreateView):
model=TipoCambio
template_name='operaciones/tc_form.html'
context_object_name='obj'
form_class=TipoCambioForm
success_url=reverse_lazy('operaciones:tc_list')
login_required = "base:login"
def form_valid(self,form):
form.instance.uc = self.request.user
return super().form_valid(form)
class TipoCambioEdit(LoginRequiredMixin, generic.UpdateView):
model=TipoCambio
template_name='operaciones/tc_form.html'
context_object_name='obj'
form_class=TipoCambioForm
success_url=reverse_lazy('operaciones:tc_list')
login_required = "base:login"
def form_valid(self,form):
form.instance.um = self.request.user.id
return super().form_valid(form)
class ReporteTC(TemplateView):
def get (self, request, *args, **kwargs):
print(request.GET.get('campo'))
fecha_sel = request.GET.get('campo')
fecha_sel_parse = parse(fecha_sel)
print('fecha ',fecha_sel_parse.date())
query = TipoCambio.objects.filter(fecha=fecha_sel)
wb = Workbook()
ws = wb.active
ws.tittle='TipoCambio'
#Establer el nombre del archivo
nombre_archivo = "ReporteTC.xlsx"
ws['B1'].alignment= Alignment(horizontal='left', vertical='center')
ws['B1'].border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws['B1'].fill = PatternFill(start_color='66FFCC', end_color='66FFCC', fill_type='solid')
ws['B1'].font = Font(name='calibri', size=12, bold=True)
ws['B1']='Mar Bran S.A. de C.V.'
ws.merge_cells('B1:F1')
ws['B2'].alignment= Alignment(horizontal='left', vertical='center')
ws['B2'].border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws['B2'].fill = PatternFill(start_color='66FFCC', end_color='66FFCC', fill_type='solid')
ws['B2'].font = Font(name='calibri', size=12, bold=True)
ws['B2']='Innovación, Mejora Continua y Six Sigma'
ws.merge_cells('B2:F2')
ws['B3'].alignment= Alignment(horizontal='left', vertical='center')
ws['B3'].border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws['B3'].fill = PatternFill(start_color='66FFCC', end_color='66FFCC', fill_type='solid')
ws['B3'].font = Font(name='calibri', size=12, bold=True)
ws['B3']='Tipo de Cambio'
ws.merge_cells('B3:F3')
ws.row_dimensions[1].height=20
ws.row_dimensions[2].height=20
ws.row_dimensions[3].height=20
ws.column_dimensions['B'].width=20
ws.column_dimensions['C'].width=20
ws.column_dimensions['D'].width=20
ws.column_dimensions['E'].width=20
ws['B6'].alignment= Alignment(horizontal='center', vertical='center')
ws['B6'].border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws['B6'].fill = PatternFill(start_color='66CFCC', end_color='66CFCC', fill_type='solid')
ws['B6'].font = Font(name='calibri', size=11, bold=True)
ws['B6']='Fecha'
ws['C6'].alignment= Alignment(horizontal='center', vertical='center')
ws['C6'].border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws['C6'].fill = PatternFill(start_color='66CFCC', end_color='66CFCC', fill_type='solid')
ws['C6'].font = Font(name='calibri', size=11, bold=True)
ws['C6']='Tipo de Cambio'
controlador = 7
for q in query:
ws.cell(row=controlador,column=2).alignment= Alignment(horizontal='center', vertical='center')
ws.cell(row=controlador,column=2).border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws.cell(row=controlador,column=2).fill = PatternFill(start_color='66CFCC', end_color='66CFCC', fill_type='solid')
ws.cell(row=controlador,column=2).font = Font(name='calibri', size=11, bold=True)
ws.cell(row=controlador,column=2).value=q.fecha
ws.cell(row=controlador,column=3).alignment= Alignment(horizontal='center', vertical='center')
ws.cell(row=controlador,column=3).border =Border(left=Side(border_style='thin'),right=Side(border_style='thin'),
top=Side(border_style='thin'), bottom=Side(border_style='thin'))
ws.cell(row=controlador,column=3).fill = PatternFill(start_color='66CFCC', end_color='66CFCC', fill_type='solid')
ws.cell(row=controlador,column=3).font = Font(name='calibri', size=11, bold=True)
ws.cell(row=controlador,column=3).value=q.tipo_cambio
#contador+=1
controlador +=1
#Definir el tipo de resupuesta a dar
response = HttpResponse(content_type='application/ms-excel')
contenido = "attachment; filename = {0}".format(nombre_archivo)
response["Content-Disposition"] = contenido
wb.save(response)
return response
#@login_required(login_url="/login/")
#@permission_required("catalogos.change_ingred",login_url="/login/")
def tc_inactivar(request,id):
tc = TipoCambio.objects.filter(pk=id).first()
if request.method=="POST":
if tc:
tc.estado = not tc.estado
tc.save()
return HttpResponse("OK")
return HttpResponse("FAIL")
return HttpResponse("FAIL") | [
"alexogch@hotmail.com"
] | alexogch@hotmail.com |
bbff0812d21cbd950f8dcd096fa53a300491a14b | 8a73f252c333d9be87ad3827f6880fb47b43625f | /tutorials/W1_ModelTypes/solutions/W1_Tutorial2_Solution_8a33b742.py | dc15afaa0b608af6694a1387b75654336eac098b | [] | no_license | tifainfaith/professional-workshop-3 | 640948d23c8207f891ff0257a38e4653af5b452b | 851077030cbb5a2f53520dbccb80e4459ae8bfc7 | refs/heads/master | 2023-08-20T00:50:11.303677 | 2021-10-27T00:46:34 | 2021-10-27T00:46:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | def lif_neuron(n_steps=1000, alpha=0.01, rate=10):
""" Simulate a linear integrate-and-fire neuron.
Args:
n_steps (int): The number of time steps to simulate the neuron's activity.
alpha (float): The input scaling factor
rate (int): The mean rate of incoming spikes
"""
# precompute Poisson samples for speed
exc = stats.poisson(rate).rvs(n_steps)
v = np.zeros(n_steps)
spike_times = []
for i in range(1, n_steps):
dv = alpha * exc[i]
v[i] = v[i-1] + dv
if v[i] > 1:
spike_times.append(i)
v[i] = 0
return v, spike_times
v, spike_times = lif_neuron()
plot_neuron_stats(v, spike_times) | [
"action@github.com"
] | action@github.com |
9b70171b34c9d395fbcb8b2bcd8eae663ee97237 | ca17bd80ac1d02c711423ac4093330172002a513 | /decorator/decorator.py | 786734854a22973c775aedfcfa7e0b0d85d65adb | [] | no_license | Omega094/lc_practice | 64046dea8bbdaee99d767b70002a2b5b56313112 | e61776bcfd5d93c663b247d71e00f1b298683714 | refs/heads/master | 2020-03-12T13:45:13.988645 | 2018-04-23T06:28:32 | 2018-04-23T06:28:32 | 130,649,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | def f1(arg):
print "f1"
rl = arg()
print rl, "This si rl"
return rl + "f1"
@f1
def f2(arg = ""):
print "f2"
return arg + "f2r"
print "start"
print f2
| [
"zhao_j1@denison.edu"
] | zhao_j1@denison.edu |
bb7f3095c4b812063e8c77e887d8afde9d682874 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02995/s824603391.py | 91c9d13ffd6406e8745ba0c78a8d2e5ef263052b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created: Jul, 13, 2020 04:11:55 by Nobody
# $Author$
# $Date$
# $URL$
__giturl__ = "$URL$"
import math
from sys import stdin
input = stdin.readline
def main():
A, B, C, D = list(map(int, input().split()))
# divisible by C
dc = B//C - (A-1)//C
# divisible by D
dd = B//D - (A-1)//D
lcm = C * D // math.gcd(C, D)
# divisible by (C and D)
dcd = B//lcm - (A-1)//lcm
# print(f'B-A+1: {B-A+1}')
# print(f'dc : {dc}')
# print(f'dd : {dd}')
# print(f'lcm : {lcm}')
# print(f'dcd : {dcd}')
print((B-A+1) - dc - dd + dcd)
if(__name__ == '__main__'):
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4252361cdb8d77952bfe9c73b5a3ef21eb5f86fc | a1b1e14dcaecbeee7a9ef7d0f3199f72e436dec9 | /migrations/0046_auto_20200731_1926.py | 3c15798c43e52f3aed7985402fd8d5f3be047b52 | [] | no_license | erllan/my-first-blog | ee99c3faad2de4039340b683143ada4c29310b31 | 0a1f37d9c95d70daaef945fbd950412281eb2cc4 | refs/heads/master | 2022-12-20T02:32:33.683460 | 2020-10-21T14:55:23 | 2020-10-21T14:55:23 | 262,575,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | # Generated by Django 3.0.2 on 2020-07-31 13:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('my_vk', '0045_auto_20200731_1855'),
]
operations = [
migrations.AlterField(
model_name='message',
name='from_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='my_vk.User'),
),
migrations.AlterField(
model_name='message',
name='message_to',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='my_vk.User'),
),
]
| [
"erlan.kubanychbekov.000@gmail.com"
] | erlan.kubanychbekov.000@gmail.com |
fe6ca773d2c95c079f2abc0a0ea3f814374940ee | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2737/60734/246210.py | 03d3b05e18516224525a5ee93bd214fb817c7686 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | import re
lst = re.findall(r'\d+',input())
lst = list(map(int,lst))
#最多只能有两个众数
m,n = 0,0
cm,cn = 0,0
for a in lst:
if a == m:
cm+=1
elif a == n:
cn+=1
elif cm == 0:
m = a
cm = 1
elif cn == 0:
n = a
cn = 1
else:#两个计数器都要减一
cm-=1
cn-=1
#重新遍历
cm, cn = 0,0
for a in lst:
if a == m:
cm+=1
elif a == n:
cn+=1
res = []
if cm>len(lst)/3:
res.append(m)
if cn>len(lst)/3:
res.append(n)
print(res) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
359fb0d94df61ec37a617dd620a2c50191bc432f | f14bf0274762591940a9f0382b6c9b99a42aedca | /WINDOW_openMDAO/src/api.py | 0684a81dd22f447d1d87f7e0deb1c424556f962e | [] | no_license | chunyuli/WINDOW_openMDAO | 5b610bcdac42fe45a69e2afcae74b92b3f27f092 | c9e39da2079d1a0b604fa9f4b9952dc663466871 | refs/heads/master | 2020-03-20T08:52:19.910162 | 2018-05-29T11:51:48 | 2018-05-29T11:51:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | from AbsWakeModel.wake_linear_solver import WakeModel
from AbsWakeModel.AbstractWakeModel import DetermineIfInWake, WakeDeficit
from AbsAEP.farmpower_workflow import AEPWorkflow
from AbsTurbulence.abstract_wake_TI import AbstractWakeAddedTurbulence, DeficitMatrix, CtMatrix
from AbsWakeModel.AbsWakeMerge.abstract_wake_merging import AbstractWakeMerge
from AbsTurbulence.TI_workflow import TIWorkflow
from SiteConditionsPrep.depth_process import AbstractWaterDepth
from AbsElectricalCollection.abstract_collection_design import AbstractElectricDesign
from AbsSupportStructure.abstract_support_design import AbstractSupportStructureDesign, MaxTI
from AbsOandM.abstract_operations_maintenance import AbstractOandM
from AbsAEP.aep import AEP
from AbsTurbine.AbsTurbine import AbsTurbine
from Utils.util_components import NumberLayout, create_random_layout
from Utils.constraints import MinDistance, WithinBoundaries
from Utils.regular_parameterised import RegularLayout
from Utils.transform_quadrilateral import AreaMapping
from Utils.read_files import read_layout, read_windrose
from Utils.workflow_options import WorkflowOptions
| [
"s.sanchezperezmoreno@tudelft.nl"
] | s.sanchezperezmoreno@tudelft.nl |
136cc322199fffd4ac050c4614a69dd646546c5a | 1dae87abcaf49f1d995d03c0ce49fbb3b983d74a | /programs/test_uw_field_lock.prg.py | 36106b8820eb6a53475dbabb1e58b66c89dc35fc | [] | no_license | BEC-Trento/BEC1-data | 651cd8e5f15a7d9848f9921b352e0830c08f27dd | f849086891bc68ecf7447f62962f791496d01858 | refs/heads/master | 2023-03-10T19:19:54.833567 | 2023-03-03T22:59:01 | 2023-03-03T22:59:01 | 132,161,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,397 | py | prg_comment = ""
prg_version = "0.7"
def program(prg, cmd):
prg.add(10000, "Initialize 0 TTL and Synchronize.sub")
prg.add(50000, "DarkSpotMOT_19.sub")
prg.add(209000000, "Synchronize.sub", enable=False)
prg.add(209943111, "MOT lights Off TTL.sub")
prg.add(209947301, "Config Field OFF.sub")
prg.add(209949001, "Gray Molasses 2017")
prg.add(209949001, "Optical pumping", enable=False)
prg.add(209949011, "Scope 2 Trigger ON")
prg.add(209986000, "Scope 2 Trigger OFF")
prg.add(210000000, "Load_Quad")
prg.add(210010000, "Quad_RampUP")
prg.add(210060000, "Mirrors Imaging")
prg.add(215000000, "Ramp_bias_down.sub")
prg.add(216000000, "All AOM On.sub", functions=dict(time=lambda x: 10015.6899+cmd.get_var('QuadRampTime'), funct_enable=False))
prg.add(220000000, "Number_lock", enable=False)
prg.add(230000000, "Evaporation amp", 1000)
prg.add(230010000, "Evaporation ramp", start_t=0.0000, func_args="a=40e6, b=4e6, duration=10, tau=1", n_points=330, func="(b - a * exp(-duration / tau) + (a - b) * exp(-t / tau)) / (1 - exp(-duration / tau))", stop_t=5000.0000, functions=dict(func_args=lambda x: 'a={}, b={}, duration={}, tau={}'.format(cmd.get_var('evap1_fstart')*1e6, cmd.get_var('evap1_fend')*1e6, cmd.get_var('evap1_time')*1e-3, cmd.get_var('evap1_tau')), stop_t=lambda x: cmd.get_var('evap1_time')))
prg.add(230014000, "Quad_RampDOWN", functions=dict(time=lambda x: x + cmd.get_var('evap1_time')))
prg.add(230014000, "Evaporation amp", 1, functions=dict(time=lambda x: x +cmd.get_var('evap1_time') + 0.98))
prg.add(232014000, "Scope 1 Trigger Pulse", polarity=1, pulse_t=0.01000, functions=dict(time=lambda x: x+cmd.get_var('evap1_time')+cmd.get_var('Quad_rampdown_time')+cmd.get_var('hold_time')+cmd.get_var('tof')-0.034))
prg.add(232014000, "Setup_imaging", functions=dict(time=lambda x: x+cmd.get_var('evap1_time')+cmd.get_var('Quad_rampdown_time')+cmd.get_var('hold_time')+cmd.get_var('tof')-2.135))
prg.add(232014000, "Pulse uw", polarity=1, pulse_t=0.00200, functions=dict(pulse_t=lambda x: 1e-3 * cmd.get_var('marconi1_pulsetime'), time=lambda x: x+cmd.get_var('evap1_time')+cmd.get_var('Quad_rampdown_time')+cmd.get_var('hold_time')), enable=False)
prg.add(232014000, "BEC_imaging", functions=dict(time=lambda x: x+cmd.get_var('evap1_time')+cmd.get_var('Quad_rampdown_time')+cmd.get_var('hold_time')+cmd.get_var('tof')), enable=False)
prg.add(232014000, "BEC_imaging_field_lock", functions=dict(time=lambda x: x+cmd.get_var('evap1_time')+cmd.get_var('Quad_rampdown_time')+cmd.get_var('hold_time')+cmd.get_var('tof')))
prg.add(232114000, "Config Field OFF.sub", functions=dict(time=lambda x: x+cmd.get_var('evap1_time')+cmd.get_var('Quad_rampdown_time')+cmd.get_var('hold_time')+cmd.get_var('tof')))
return prg
def commands(cmd):
import numpy as np
iters = np.arange(-0.1001, -0.06, 0.002)
np.random.shuffle(iters)
j = 0
while(cmd.running):
print('\n-------o-------')
Bx_bottom = iters[j]
cmd.set_var('Bx_bottom', Bx_bottom)
print('\n')
print('Run #%d/%d, with variables:\nBx_bottom = %g\n'%(j+1, len(iters), Bx_bottom))
cmd._system.run_number = j
cmd.run(wait_end=True, add_time=100)
j += 1
if j == len(iters):
cmd._system.run_number = 0
cmd.stop()
return cmd
| [
"carmelo.mordini@unitn.it"
] | carmelo.mordini@unitn.it |
51904ecbfe9cffd3ebd5c76f9e8576698bc9b7cd | fcecddb522cd6b775074ecd950aaddda9010ae57 | /Serenity/core/view_generators.py | 83e32ebfe5661f107d29a155e62250e9dfc250ea | [] | no_license | Firefly-Automation/Serenity | fa7fbe7800feed60a96a454e6fb520afa4c2030c | e343b31238af27255468a0773598130fd65d9793 | refs/heads/master | 2021-06-12T03:30:47.139101 | 2017-01-06T04:26:33 | 2017-01-06T04:26:33 | 66,432,533 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # -*- coding: utf-8 -*-
# @Author: zpriddy
# @Date: 2016-08-23 22:07:06
# @Last Modified by: zpriddy
# @Last Modified time: 2016-08-23 22:20:07
## build array of all templates minified
## devices can have 'custom_view:true' and then 'custom_html:' as part of the device view
## proccess the data that comes in and generate the device_vew json object
## write the object to disk - Should only have to re-generate every few minutes | [
"me@zpriddy.com"
] | me@zpriddy.com |
4fd8f9a4b477f960834b1c2278051375e53aa7f4 | 23d8a521f1f2c15ec745d8a68f405be5c8ad58ba | /acme/tf/networks/legal_actions.py | 93b53ee3888e1df13c347028b542892cd543de54 | [
"Apache-2.0"
] | permissive | Idate96/acme | 108766d67d1c123f4f90045b3ad459e9f25a9cf1 | 722c33a3b8c779647314444531cb2282fab9246a | refs/heads/master | 2023-04-20T18:54:43.100914 | 2021-05-22T14:08:43 | 2021-05-22T14:08:43 | 368,240,977 | 0 | 0 | Apache-2.0 | 2021-05-17T15:48:11 | 2021-05-17T15:48:10 | null | UTF-8 | Python | false | false | 4,759 | py | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks used for handling illegal actions."""
from typing import Any, Callable, Iterable, Union
# pytype: disable=import-error
from acme.wrappers import open_spiel_wrapper
# pytype: enable=import-error
import numpy as np
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
class MaskedSequential(snt.Module):
"""Applies a legal actions mask to a linear chain of modules / callables.
It is assumed the trailing dimension of the final layer (representing
action values) is the same as the trailing dimension of legal_actions.
"""
def __init__(self,
layers: Iterable[Callable[..., Any]] = None,
name: str = 'MaskedSequential'):
super().__init__(name=name)
self._layers = list(layers) if layers is not None else []
self._illegal_action_penalty = -1e9
# Note: illegal_action_penalty cannot be -np.inf because trfl's qlearning
# ops utilize a batched_index function that returns NaN whenever -np.inf
# is present among action values.
def __call__(self, inputs: open_spiel_wrapper.OLT) -> tf.Tensor:
# Extract observation, legal actions, and terminal
outputs = inputs.observation
legal_actions = inputs.legal_actions
terminal = inputs.terminal
for mod in self._layers:
outputs = mod(outputs)
# Apply legal actions mask
outputs = tf.where(tf.equal(legal_actions, 1), outputs,
tf.fill(tf.shape(outputs), self._illegal_action_penalty))
# When computing the Q-learning target (r_t + d_t * max q_t) we need to
# ensure max q_t = 0 in terminal states.
outputs = tf.where(tf.equal(terminal, 1), tf.zeros_like(outputs), outputs)
return outputs
# FIXME: Add functionality to support decaying epsilon parameter.
# FIXME: This is a modified version of trfl's epsilon_greedy() which
# incorporates code from the bug fix described here
# https://github.com/deepmind/trfl/pull/28
class EpsilonGreedy(snt.Module):
"""Computes an epsilon-greedy distribution over actions.
This policy does the following:
- With probability 1 - epsilon, take the action corresponding to the highest
action value, breaking ties uniformly at random.
- With probability epsilon, take an action uniformly at random.
"""
def __init__(self,
epsilon: Union[tf.Tensor, float],
threshold: float,
name: str = 'EpsilonGreedy'):
"""Initialize the policy.
Args:
epsilon: Exploratory param with value between 0 and 1.
threshold: Action values must exceed this value to qualify as a legal
action and possibly be selected by the policy.
name: Name of the network.
Returns:
policy: tfp.distributions.Categorical distribution representing the
policy.
"""
super().__init__(name=name)
self._epsilon = tf.Variable(epsilon, trainable=False)
self._threshold = threshold
def __call__(self, action_values: tf.Tensor) -> tfd.Categorical:
legal_actions_mask = tf.where(
tf.math.less_equal(action_values, self._threshold),
tf.fill(tf.shape(action_values), 0.),
tf.fill(tf.shape(action_values), 1.))
# Dithering action distribution.
dither_probs = 1 / tf.reduce_sum(legal_actions_mask, axis=-1,
keepdims=True) * legal_actions_mask
masked_action_values = tf.where(tf.equal(legal_actions_mask, 1),
action_values,
tf.fill(tf.shape(action_values), -np.inf))
# Greedy action distribution, breaking ties uniformly at random.
max_value = tf.reduce_max(masked_action_values, axis=-1, keepdims=True)
greedy_probs = tf.cast(
tf.equal(action_values * legal_actions_mask, max_value),
action_values.dtype)
greedy_probs /= tf.reduce_sum(greedy_probs, axis=-1, keepdims=True)
# Epsilon-greedy action distribution.
probs = self._epsilon * dither_probs + (1 - self._epsilon) * greedy_probs
# Make the policy object.
policy = tfd.Categorical(probs=probs)
return policy
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.