blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
773c3cc121a7313a3dce5a3dbaea4cc5f874ba89 | 29f6b4804f06b8aabccd56fd122b54e4d556c59a | /CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Demo/ceilometer/ceilometer/network/statistics/port.py | 157c6686776787237e2dcaa610abe36336383467 | [
"Apache-2.0"
] | permissive | obahy/Susereum | 6ef6ae331c7c8f91d64177db97e0c344f62783fa | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | refs/heads/master | 2020-03-27T11:52:28.424277 | 2018-12-12T02:53:47 | 2018-12-12T02:53:47 | 146,511,286 | 3 | 2 | Apache-2.0 | 2018-12-05T01:34:17 | 2018-08-28T21:57:59 | HTML | UTF-8 | Python | false | false | 2,937 | py | #
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.network import statistics
from ceilometer import sample
class PortPollster(statistics._Base):
meter_name = 'switch.port'
meter_type = sample.TYPE_GAUGE
meter_unit = 'port'
class PortPollsterReceivePackets(statistics._Base):
meter_name = 'switch.port.receive.packets'
meter_type = sample.TYPE_CUMULATIVE
meter_unit = 'packet'
class PortPollsterTransmitPackets(statistics._Base):
meter_name = 'switch.port.transmit.packets'
meter_type = sample.TYPE_CUMULATIVE
meter_unit = 'packet'
class PortPollsterReceiveBytes(statistics._Base):
meter_name = 'switch.port.receive.bytes'
meter_type = sample.TYPE_CUMULATIVE
meter_unit = 'B'
class PortPollsterTransmitBytes(statistics._Base):
meter_name = 'switch.port.transmit.bytes'
meter_type = sample.TYPE_CUMULATIVE
meter_unit = 'B'
class PortPollsterReceiveDrops(statistics._Base):
meter_name = 'switch.port.receive.drops'
meter_type = sample.TYPE_CUMULATIVE
meter_unit = 'packet'
class PortPollsterTransmitDrops(statistics._Base):
meter_name = 'switch.port.transmit.drops'
meter_type = sample.TYPE_CUMULATIVE
meter_unit = 'packet'
class PortPollsterReceiveErrors(statistics._Base):
meter_name = 'switch.port.receive.errors'
meter_type = sample.TYPE_CUMULATIVE
meter_unit = 'packet'
class PortPollsterTransmitErrors(statistics._Base):
meter_name = 'switch.port.transmit.errors'
meter_type = sample.TYPE_CUMULATIVE
meter_unit = 'packet'
class PortPollsterReceiveFrameErrors(statistics._Base):
meter_name = 'switch.port.receive.frame_error'
meter_type = sample.TYPE_CUMULATIVE
meter_unit = 'packet'
class PortPollsterReceiveOverrunErrors(statistics._Base):
meter_name = 'switch.port.receive.overrun_error'
meter_type = sample.TYPE_CUMULATIVE
meter_unit = 'packet'
class PortPollsterReceiveCRCErrors(statistics._Base):
meter_name = 'switch.port.receive.crc_error'
meter_type = sample.TYPE_CUMULATIVE
meter_unit = 'packet'
class PortPollsterCollisionCount(statistics._Base):
meter_name = 'switch.port.collision.count'
meter_type = sample.TYPE_CUMULATIVE
meter_unit = 'packet'
| [
"abelgomezr45@gmail.com"
] | abelgomezr45@gmail.com |
0c8e9163f2888dad87748866da7f0d5bbc0f96f5 | 212d39dd0e12d42ce9b830de7e8738504dda2428 | /ipc/fork2.py | 121bd21a75495360d20399dd7b2d622483bf3c29 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | waveform80/presentations | a0c7869f5acd699922f84ed1b510519c00472887 | 9e8d9f63d4e841e573d5b9b01c234128d49c29c5 | refs/heads/master | 2023-05-12T21:29:29.083191 | 2023-05-04T07:29:59 | 2023-05-04T07:29:59 | 21,940,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | import io
import os
from time import sleep
f = io.open('my_data', 'w+')
pid = os.fork()
if pid:
print("I'm the master: sending data")
f.write("hello")
else:
print("I'm the slave: waiting for data")
sleep(1)
f.seek(0)
print("Received", f.read())
| [
"dave@waveform.org.uk"
] | dave@waveform.org.uk |
a641b6ef567ff86587e0d3453127a1d2d44f66f2 | 1dd687bdb3bb964383c3f4dde7e9eae8a09be5f5 | /pyleecan/Methods/Machine/Machine/plot_anim_rotor.py | 4524207e96adf3c8153a8d9be9ce90a1feec4933 | [
"Apache-2.0"
] | permissive | Kelos-Zhu/pyleecan | 4daa2c8738cfe8a721ac2bdf883c59a1b52d8570 | 368f8379688e31a6c26d2c1cd426f21dfbceff2a | refs/heads/master | 2022-11-18T14:30:29.787005 | 2020-07-09T16:55:02 | 2020-07-09T16:55:02 | 278,112,321 | 0 | 0 | Apache-2.0 | 2020-07-08T14:31:39 | 2020-07-08T14:31:38 | null | UTF-8 | Python | false | false | 2,121 | py | from numpy import pi
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.pyplot import axis, subplots
from matplotlib.patches import Polygon, Circle
def plot_anim_rotor(self, Nframe, Tanim, Nrot=1, is_loop=True):
"""Plot the machine with an animation of the rotor
(inner rotor for now ?)
Parameters
----------
self : Machine
Machine object
Nframe: int
Number of frame for the animation
Tanim : float
Duration of the animation [ms]
Nrot : float
Number of rotation
is_loop : bool
True to activate the loop animation
"""
# Display
fig, axes = subplots()
axes.set_xlabel("(m)")
axes.set_ylabel("(m)")
axes.set_title("Machine")
# Axis Setup
axis("equal")
# The Lamination is centered in the figure
Lim = (self.stator.Rext) * 1.5 # Axes limit for plot
Rsurf = self.rotor.build_geometry(sym=1, alpha=0, delta=0)
# Rotation angle between each frame
Dalpha = 2 * pi * Nrot / Nframe
def init():
"""Create the patches for the first image
"""
Spatches = self.stator.plot(is_display=False)
Rpatches = self.rotor.plot(is_display=False)
for patch in Spatches:
axes.add_patch(patch)
for patch in Rpatches:
axes.add_patch(patch)
return []
def update_rotor(ii):
"""Rotate and update the rotor patches
"""
for ii in range(len(Rsurf)):
Rsurf[ii].rotate(Dalpha)
patches = Rsurf[ii].get_patches()
for patch in patches:
if type(patch) is Polygon:
axes.patches[-len(Rsurf) + ii].xy = patch.xy
# elif type(patch) is Circle:
# pass
axes.set_xlim(-Lim, Lim)
axes.set_ylim(-Lim, Lim)
return []
# Animation definition
anim = animation.FuncAnimation(
fig,
update_rotor,
init_func=init,
frames=Nframe,
interval=Tanim / Nframe,
blit=True,
repeat=is_loop,
)
plt.show()
| [
"pierre.bonneel@gmail.com"
] | pierre.bonneel@gmail.com |
318bb13a3661a89b40f876c3b2d72dfee56d1365 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_176/ch27_2019_03_15_04_30_11_986374.py | c83630adaec1d3021c42b98cdc52c9fc5978453f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | f= int(input('quantos cigarros fuma por dia?'))
a= int(input('há quantos anos fuma?'))
print ('a quantidade de tempo perdido em dias é: {0:.2f}'.format(a*n))
| [
"you@example.com"
] | you@example.com |
0e2a2e57dcde388e4acef2b5ce123becfe52f7be | 08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc | /src/mnistk/networks/linearbias_11.py | 2dcc5515834c7e7014dcd4a2738d353cae809eab | [] | no_license | ahgamut/mnistk | 58dadffad204602d425b18549e9b3d245dbf5486 | 19a661185e6d82996624fc6fcc03de7ad9213eb0 | refs/heads/master | 2021-11-04T07:36:07.394100 | 2021-10-27T18:37:12 | 2021-10-27T18:37:12 | 227,103,881 | 2 | 1 | null | 2020-02-19T22:07:24 | 2019-12-10T11:33:09 | Python | UTF-8 | Python | false | false | 802 | py | # -*- coding: utf-8 -*-
"""
linearbias_11.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
class LinearBias_11(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Linear(in_features=784, out_features=79, bias=True)
self.f1 = nn.Linear(in_features=79, out_features=79, bias=True)
self.f2 = nn.Linear(in_features=79, out_features=65, bias=True)
self.f3 = nn.Linear(in_features=65, out_features=10, bias=True)
self.f4 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],784)
x = self.f0(x)
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
x = self.f4(x)
return x
| [
"41098605+ahgamut@users.noreply.github.com"
] | 41098605+ahgamut@users.noreply.github.com |
f29d716cc9eccd1a742ce10c9deb5c7d563cfdf5 | d210853ba6d1f3b5383a09e1b553c19083d78014 | /server/cart/utils.py | c5c47f24bfe9d396766e798b67c8a78dc7232248 | [] | no_license | Hagen013/presidentwatches | f252c7995e39f6cffb6608e43f555abc32f6a9fc | b9ca72aef1db01262675274c83a5c5dff4d6e2da | refs/heads/master | 2022-12-17T08:45:15.541869 | 2019-12-29T17:48:56 | 2019-12-29T17:48:56 | 162,160,435 | 0 | 0 | null | 2022-12-08T01:49:45 | 2018-12-17T16:36:05 | HTML | UTF-8 | Python | false | false | 680 | py | from .models import Promocode
from .constants import (PICKPOINT_TO_CDEK_STATUS_CODE_MAPPING,
RUPOST_TO_CDEK_STATUS_CODE_MAPPING,
RUPOST_TEXT_TO_STATUS_MAPPING)
def get_promocode_by_brand(brand):
return Promocode.objects.filter(brands__in=self.brand).order_by(sale_amount)
def pickpoint_to_cdek_code(code):
return PICKPOINT_TO_CDEK_STATUS_CODE_MAPPING.get(code, code)
def rupost_to_cdek_code(code):
cdek_code = RUPOST_TO_CDEK_STATUS_CODE_MAPPING.get(str(code), None)
if cdek_code is None:
return code
return cdek_code
def rupost_msg_to_code(msg):
return RUPOST_TEXT_TO_STATUS_MAPPING.get(msg, 0)
| [
"="
] | = |
0097f56446dc96fbe0812c302ca72860b5a374e9 | 6fbb1f5625aaa6381ec09e76e0f52fc48e2af9b0 | /alternatingSort.py | ab89cf927f4447ebee51ce2c9a00680f8bec26e8 | [] | no_license | hemal507/CS-Algorithms | 273341383bb00d843d95fe26a07d47555c2d7728 | 154c21982485a1c9b94ed54df6e6e12be9c54f98 | refs/heads/master | 2021-01-01T20:02:32.734198 | 2020-05-24T12:06:51 | 2020-05-24T12:06:51 | 98,743,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | def alternatingSort(a):
if len(a) != len(set(a)) :
return False
b = [None]*(len(a))
for i in range(len(a)) :
if i % 2 == 0 :
b[i] = a[i/2]
else :
b[i] = a[-i/2]
s = sorted(b)
if b == s :
return True
else :
return False
| [
"noreply@github.com"
] | hemal507.noreply@github.com |
6642866cf5ff1d95a5d9e5c2d5f549797601a25f | e75cb799000d3120d4a93a2826fe101228d1e1a7 | /03-python practical examples/01a-beautiful soup - coreyms dot com - brief.py | ad181e5f815eca3b4abe9fc50e2cf30c6332ff6d | [] | no_license | atrox3d/python-corey-schafer-tutorials | 89fb410f1e13fd6910dc42401782b1406ffb87e8 | 9d991e3dd9647adc55ae1f343fedfc3faa202b01 | refs/heads/master | 2022-12-14T17:55:01.566427 | 2022-09-02T15:48:20 | 2022-09-02T15:48:20 | 221,426,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | from modules import utils
import os.path
from bs4 import BeautifulSoup
import requests
########################################################################################################################
utils.banner(
'https://www.youtube.com/watch?v=ng2o98k983k',
'web scraping with beautiful soup',
'http://coreyms.com'
)
request = requests.get('http://coreyms.com') # execute request
print(f'request to http://coreyms.com: {request}') # print request http status
source = request.text # extract request text (html)
########################################################################################################################
utils.banner('parse source with BeautifulSoup (prettify)')
soup = BeautifulSoup(source, 'lxml') # parse html with BeautifulSoup via lxml
########################################################################################################################
for article in soup.find_all('article'): # find tag article inside soup obj
headline = article.h2.a.text # navigate to link text
headline2 = article.a.text # same, different syntax
print(f'{headline2!r}')
summary = article.find('div', class_='entry-content') # extract div from article obj
print(f'{summary.p.text!r}')
video = article.find('iframe') # get iframe inside article
if video is not None:
video_src = video['src'] # display video src
video_url = video_src.split('?')[0] # get left part of url before ?
videourl_parts = video_url.split('/') # get url tokens split by /
video_id = videourl_parts[-1] # finally, get id
yt_link = f'https://youtube.com/watch?v={video_id}' # create link
print(yt_link)
print()
| [
"atrox3d@gmail.com"
] | atrox3d@gmail.com |
c3f55696585346c21eb70a605ee293b6d94b03f0 | 109a3ed4e5355e0ba5ef9454ff46ee2acc58e013 | /background/alert_handler.py | 71da5910b10fe5608f511b2407091804a0870aea | [] | no_license | zhaobin022/monitor | 9ff828b8316995a6cf930ae3643d25ff627d4bdb | fe5aacf49bf6961147030312a114986fda7f04ab | refs/heads/master | 2020-07-10T21:04:24.024708 | 2016-09-08T03:35:46 | 2016-09-08T03:35:46 | 66,442,685 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,896 | py | __author__ = 'zhaobin022'
from background import models
import json
import copy
import time
import operator
class AlertHandler(object):
def __init__(self,client_id,redis_obj,mq_conn):
self.client_id = client_id
self.redis_obj = redis_obj
self.mq_conn = mq_conn
self.mq_channel = self.mq_conn.channel()
def get_host_triggers(self):
triggers = []
host_obj = models.Host.objects.get(id=self.client_id)
for t in host_obj.templates.select_related():
triggers.extend(t.triggers.select_related())
for g in host_obj.host_groups.select_related():
for t in g.templates.select_related():
triggers.extend(t.triggers.select_related())
return set(triggers)
def load_data_from_redis(self,time_in_second,interval,redis_key):
data_point_count = time_in_second/interval+5
redis_slice = self.redis_obj.lrange(redis_key,-data_point_count,-1)
ret = []
redis_slice.reverse()
for p in redis_slice:
p = json.loads(p)
update_time = p[1]
if time.time() - update_time < time_in_second:
ret.append(p)
return ret
def deal_expression(self,expression):
time_range = expression.data_calc_args.split(',')[0]
time_in_second = int(time_range) * 60
interval = expression.service.interval
redis_key = "StatusData_%s_%s_latest" %(self.client_id,expression.service.name)
data_set = self.load_data_from_redis(time_in_second,interval,redis_key)
data_calc_func = getattr(self,'get_%s' % expression.data_calc_func)
ret = data_calc_func(data_set,expression)
return ret
def get_avg(self,data_set,expression):
temp_dic = {}
if data_set:
data_point = data_set[0]
if 'data' not in data_point[0].keys():
ret_list = []
for p in data_set:
val = p[0][expression.service_index.key]
ret_list.append(float(val))
avg_num = sum(ret_list)/len(ret_list)
f = getattr(operator,expression.operator_type)
ret = [f(avg_num,expression.threshold),round(float(avg_num),2),None]
return ret
else:
ret_dic = {}
for key,val in data_point[0]['data'].items():
if key == expression.specified_index_key:
for sub_key , sub_val in val.items():
if sub_key == expression.service_index.key:
if not ret_dic.has_key(key):
ret_dic[key] = {}
if not ret_dic[key].has_key(sub_key):
ret_dic[key][sub_key] = []
for p in data_set:
data_point,time_stamp = p
for key , val in data_point['data'].items():
if key == expression.specified_index_key:
for sub_key , sub_val in val.items():
if sub_key == expression.service_index.key:
ret_dic[key][sub_key].append(float(sub_val))
avg_num = sum(ret_dic[expression.specified_index_key][expression.service_index.key])/len(ret_dic[expression.specified_index_key][expression.service_index.key])
if hasattr(operator,expression.operator_type):
func = getattr(operator,expression.operator_type)
status = func(avg_num,expression.threshold)
return [status,round(avg_num,2),expression.specified_index_key]
def process(self):
print 'in alert process '
triggers = self.get_host_triggers()
for t in triggers:
positive_expressions = []
expression_ret_str = ''
redis_alert_key = 'host_%s_trigger_%s' %(self.client_id,t.id)
alert_data_in_redis = self.redis_obj.get(redis_alert_key)
redis_key_flag = False
if alert_data_in_redis:
redis_key_flag = True
for expression in t.triggerexpression_set.select_related().order_by('id'):
expression_ret = self.deal_expression(expression)
if expression_ret:
expression_ret_str += str(expression_ret[0])
if expression_ret[0]:
expression_ret.insert(1,expression.service_index.key)
expression_ret.insert(1,expression.data_calc_func)
expression_ret.insert(1,expression.service.name)
positive_expressions.append(expression_ret)
if expression.logic_type:
expression_ret_str += " "+expression.logic_type+" "
notify_flag = eval(expression_ret_str)
recover_data = ''
if notify_flag:
if redis_key_flag:
notify_data = json.loads(alert_data_in_redis)
else:
notify_data = {}
notify_data['client_id'] = self.client_id
notify_data['trigger_id'] = t.id
notify_data['trigger_name'] = t.name
notify_data['status'] = True
notify_data['notify_detail'] = positive_expressions
self.redis_obj.set(redis_alert_key,json.dumps(notify_data))
print notify_data,'notify_data'
self.mq_channel.queue_declare(queue='trigger_notify')
self.mq_channel.basic_publish(exchange='', routing_key='trigger_notify', body=json.dumps(notify_data))
else:
if redis_key_flag:
# alert_data_in_redis = self.redis_obj.get(redis_alert_key)
alert_data_in_redis = json.loads(alert_data_in_redis)
alert_data_in_redis['status'] = False
self.redis_obj.set(redis_alert_key,json.dumps(alert_data_in_redis))
self.mq_channel.queue_declare(queue='trigger_notify')
self.mq_channel.basic_publish(exchange='', routing_key='trigger_notify', body=json.dumps(alert_data_in_redis)) # self.redis_obj.delete(redis_alert_key)
alert_dic = {}
alert_dic['client_id'] = self.client_id
print 'alert_dic for host alive ..........................'
self.mq_channel.queue_declare(queue='host_alive_notify')
self.mq_channel.basic_publish(exchange='', routing_key='host_alive_notify', body=json.dumps(alert_dic)) | [
"zhaobin022@qq.com"
] | zhaobin022@qq.com |
00f11b662e6c46b1f0cc977d2fbb0f97ed91a934 | 45844683ca61f6f1a3c70d4a82d50ade067b9de7 | /posts/migrations/0005_auto_20171210_0017.py | 2de7bde5d668694a271554db585276150129e942 | [] | no_license | PHironaka/bi-fun | 5a8faf89d57ecb021eb4de9fcbb5f29bd7efd0fa | cb961678c938f0704c6c6127585c0a3c4044bbc8 | refs/heads/master | 2023-01-10T06:48:42.744321 | 2019-09-03T23:00:42 | 2019-09-03T23:00:42 | 112,222,207 | 0 | 0 | null | 2022-12-29T11:38:41 | 2017-11-27T16:45:57 | HTML | UTF-8 | Python | false | false | 454 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-12-10 00:17
from __future__ import unicode_literals
from django.db import migrations
import markdownx.models
class Migration(migrations.Migration):
dependencies = [
('posts', '0004_post_tags'),
]
operations = [
migrations.AlterField(
model_name='post',
name='content',
field=markdownx.models.MarkdownxField(),
),
]
| [
"peterhironaka@mac.com"
] | peterhironaka@mac.com |
58c2012eb70833208bfab5762221cf5e8d53507f | cac9c211a4eeb55cfd61d8e5c54a1d4082c4de33 | /survol/sources_types/sqlserver/dsn/sqlserver_dsn_sessions.py | 3077d7ca241faa34737ee30d6296b68d5e35fe70 | [
"BSD-3-Clause"
] | permissive | vchateauneu/survol | 8c8b5db67f81c6400c3e2f4b84b57fb83d69fb1f | 2b5be9d28115f8f9b1dd91bf05449c92bf9a9926 | refs/heads/master | 2020-03-21T09:11:37.765314 | 2018-07-03T20:40:16 | 2018-07-03T20:40:16 | 138,387,051 | 1 | 0 | null | 2018-06-23T09:05:45 | 2018-06-23T09:05:45 | null | UTF-8 | Python | false | false | 3,054 | py | #!/usr/bin/python
"""
Sessions in SQL Server database (ODBC)
"""
import sys
import lib_common
from lib_properties import pc
from sources_types.odbc import dsn as survol_odbc_dsn
from sources_types.sqlserver import dsn as survol_sqlserver_dsn
from sources_types.sqlserver import session
try:
import pyodbc
except ImportError:
lib_common.ErrorMessageHtml("pyodbc Python library not installed")
def Main():
cgiEnv = lib_common.CgiEnv()
grph = cgiEnv.GetGraph()
dsnNam = survol_odbc_dsn.GetDsnNameFromCgi(cgiEnv)
sys.stderr.write("dsn=(%s)\n" % dsnNam)
nodeDsn = survol_sqlserver_dsn.MakeUri(dsnNam)
ODBC_ConnectString = survol_odbc_dsn.MakeOdbcConnectionString(dsnNam)
try:
cnxn = pyodbc.connect(ODBC_ConnectString)
sys.stderr.write("Connected: %s\n" % dsnNam)
cursorSessions = cnxn.cursor()
qrySessions = """
SELECT host_name,host_process_id,session_id,program_name,client_interface_name,original_login_name,nt_domain,nt_user_name
FROM sys.dm_exec_sessions
"""
propSqlServerSession = lib_common.MakeProp("SqlServer session")
propSqlServerHostProcess = lib_common.MakeProp("Host process")
propSqlServerProgramName = lib_common.MakeProp("Program name")
propSqlServerClientInterface = lib_common.MakeProp("Client Interface")
propSqlServerOriginalLoginName = lib_common.MakeProp("original_login_name")
propSqlServerNTDomain = lib_common.MakeProp("nt_domain")
propSqlServerNTUserName = lib_common.MakeProp("nt_user_name")
for rowSess in cursorSessions.execute(qrySessions):
sys.stderr.write("rowSess.session_id=(%s)\n" % rowSess.session_id)
nodeSession = session.MakeUri(dsnNam, rowSess.session_id)
grph.add((nodeDsn, propSqlServerSession, nodeSession))
if rowSess.host_process_id:
node_process = lib_common.RemoteBox(rowSess.host_name).PidUri(rowSess.host_process_id)
grph.add((node_process, pc.property_pid, lib_common.NodeLiteral(rowSess.host_process_id)))
grph.add((nodeSession, propSqlServerHostProcess, node_process))
if rowSess.program_name:
grph.add((nodeSession, propSqlServerProgramName, lib_common.NodeLiteral(rowSess.program_name)))
if rowSess.client_interface_name:
grph.add((nodeSession, propSqlServerClientInterface, lib_common.NodeLiteral(rowSess.client_interface_name)))
# TODO: Make nodes with these:
if rowSess.original_login_name:
grph.add((nodeSession, propSqlServerOriginalLoginName, lib_common.NodeLiteral(rowSess.original_login_name)))
if rowSess.nt_domain:
grph.add((nodeSession, propSqlServerNTDomain, lib_common.NodeLiteral(rowSess.nt_domain)))
if rowSess.nt_user_name:
grph.add((nodeSession, propSqlServerNTUserName, lib_common.NodeLiteral(rowSess.nt_user_name)))
except Exception:
exc = sys.exc_info()[0]
lib_common.ErrorMessageHtml(
"nodeDsn=%s Unexpected error:%s" % (dsnNam, str(sys.exc_info()))) # cgiEnv.OutCgiRdf()
cgiEnv.OutCgiRdf("LAYOUT_RECT",[propSqlServerSession,propSqlServerHostProcess])
if __name__ == '__main__':
Main()
# http://www.easysoft.com/developer/languages/python/pyodbc.html
| [
"remi.chateauneu@gmail.com"
] | remi.chateauneu@gmail.com |
bfc091a392d75815015f901cd1b8fe44eb78dd3a | c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce | /flask/flaskenv/Lib/site-packages/tensorflow/contrib/distributions/python/ops/bijectors/ordered.py | acb356d50e500ad649f626bb57b53dd2254739bd | [] | no_license | AhsonAslam/webapi | 54cf7466aac4685da1105f9fb84c686e38f92121 | 1b2bfa4614e7afdc57c9210b0674506ea70b20b5 | refs/heads/master | 2020-07-27T06:05:36.057953 | 2019-09-17T06:35:33 | 2019-09-17T06:35:33 | 208,895,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:6bf5b33839d1904f951bf6c90d6121fb8d3fc6788c52baa16e30a171aca54574
size 4965
| [
"github@cuba12345"
] | github@cuba12345 |
3a9ec80a07429d3fb5044b350f7ded7d8eb73c1c | 660b2e940ccee2b729aa7d00ef0453cdac9dbf6a | /student_and_information/student_and_information/settings.py | e11cea4a0923eda94711f365b8cd761791263ded | [] | no_license | iversongit/20180427 | 68c251b6acfef91252d7e622cffae8450dbcdb3f | 9a9f1f1abaa3b620a000b2c8f5f91d8acfd77d8a | refs/heads/master | 2020-03-13T17:51:55.044089 | 2018-04-27T13:30:31 | 2018-04-27T13:30:31 | 131,225,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,391 | py | """
Django settings for student_and_information project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(i)-_gb_!rp+*ul5&xjauxgs*)q#a142neptc4$$w()vnle3mn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'student',
'information',
'uauth'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'student_and_information.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'student_and_information.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': 'localhost',
'NAME': 'student_and_information',
'USER': 'root',
'PASSWORD': '5201314',
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static')
] | [
"1564329410@qq.com"
] | 1564329410@qq.com |
d7537dfc6e026d7cec6f128f87ae1bae37731b7e | 779b0dde59be392abf91a38d250ffd04c65cb5cc | /reinforcement/tensorflow/minigo/selfplay_worker.py | 011137792a60c0d71eb53e25406150ce8462a663 | [
"Apache-2.0"
] | permissive | libbyandhelen/reference | 6bb028da97df81738b1e534790c714ae3ccd7688 | f11b8461e6ffa76606ce0bdd00cf5f571dcf0708 | refs/heads/master | 2022-12-11T00:36:16.902050 | 2018-06-12T21:02:56 | 2018-06-12T21:02:56 | 136,956,850 | 0 | 1 | null | 2022-12-08T02:11:29 | 2018-06-11T17:06:54 | Python | UTF-8 | Python | false | false | 9,258 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper scripts to ensure that main.py commands are called correctly."""
# import argh
# import argparse
# import cloud_logging
# import logging
import glob
import os
import main
import shipname
import sys
# import time
# import shutil
import dual_net
import preprocessing
import numpy
import random
# from utils import timer
# from tensorflow import gfile
# import tensorflow as tf
import logging
import goparams
# import qmeas
# import multiprocessing
# Pull in environment variables. Run `source ./cluster/common` to set these.
#BUCKET_NAME = os.environ['BUCKET_NAME']
#BASE_DIR = "gs://{}".format(BUCKET_NAME)
#BASE_DIR = goparams.BASE_DIR
BASE_DIR = sys.argv[1]
MODELS_DIR = os.path.join(BASE_DIR, 'models')
SELFPLAY_DIR = os.path.join(BASE_DIR, 'data/selfplay')
HOLDOUT_DIR = os.path.join(BASE_DIR, 'data/holdout')
SGF_DIR = os.path.join(BASE_DIR, 'sgf')
TRAINING_CHUNK_DIR = os.path.join(BASE_DIR, 'data', 'training_chunks')
ESTIMATOR_WORKING_DIR = os.path.join(BASE_DIR, 'estimator_working_dir')
# What percent of games to holdout from training per generation
HOLDOUT_PCT = goparams.HOLDOUT_PCT
def print_flags():
flags = {
#'BUCKET_NAME': BUCKET_NAME,
'BASE_DIR': BASE_DIR,
'MODELS_DIR': MODELS_DIR,
'SELFPLAY_DIR': SELFPLAY_DIR,
'HOLDOUT_DIR': HOLDOUT_DIR,
'SGF_DIR': SGF_DIR,
'TRAINING_CHUNK_DIR': TRAINING_CHUNK_DIR,
'ESTIMATOR_WORKING_DIR': ESTIMATOR_WORKING_DIR,
}
print("Computed variables are:")
print('\n'.join('--{}={}'.format(flag, value)
for flag, value in flags.items()))
def get_models():
"""Finds all models, returning a list of model number and names
sorted increasing.
Returns: [(13, 000013-modelname), (17, 000017-modelname), ...etc]
"""
# all_models = glob.glob(os.path.join(MODELS_DIR, '*.meta'))
all_models = glob.glob(os.path.join(MODELS_DIR, '*'))
model_filenames = [os.path.basename(m) for m in all_models]
model_numbers_names = sorted([
(shipname.detect_model_num(m), shipname.detect_model_name(m))
for m in model_filenames])
return model_numbers_names
def get_latest_model():
"""Finds the latest model, returning its model number and name
Returns: (17, 000017-modelname)
"""
models = get_models()
if len(models) == 0:
models = [(0, '000000-bootstrap')]
return models[-1]
def get_model(model_num):
models = {k: v for k, v in get_models()}
if not model_num in models:
raise ValueError("Model {} not found!".format(model_num))
return models[model_num]
def game_counts(n_back=20):
"""Prints statistics for the most recent n_back models"""
all_models = glob.glob(os.path.join(MODELS_DIR, '*.meta'))
model_filenames = sorted([os.path.basename(m).split('.')[0]
for m in all_models], reverse=True)
for m in model_filenames[:n_back]:
games = glob.glob(os.path.join(SELFPLAY_DIR, m, '*.zz'))
print(m, len(games))
def bootstrap():
bootstrap_name = shipname.generate(0)
bootstrap_model_path = os.path.join(MODELS_DIR, bootstrap_name)
print("Bootstrapping with working dir {}\n Model 0 exported to {}".format(
ESTIMATOR_WORKING_DIR, bootstrap_model_path))
main.bootstrap(ESTIMATOR_WORKING_DIR, bootstrap_model_path)
def selfplay(model_name, readouts=goparams.SP_READOUTS, verbose=1, resign_threshold=0.95):
print("Playing a game with model {}".format(model_name))
model_save_path = os.path.join(MODELS_DIR, model_name)
game_output_dir = os.path.join(SELFPLAY_DIR, model_name)
game_holdout_dir = os.path.join(HOLDOUT_DIR, model_name)
sgf_dir = os.path.join(SGF_DIR, model_name)
main.selfplay(
load_file=model_save_path,
output_dir=game_output_dir,
holdout_dir=game_holdout_dir,
output_sgf=sgf_dir,
readouts=readouts,
holdout_pct=HOLDOUT_PCT,
resign_threshold=resign_threshold,
verbose=verbose,
)
def selfplay_cache_model(network, model_name, readouts=goparams.SP_READOUTS, verbose=1, resign_threshold=0.95):
print("Playing a game with model {}".format(model_name))
game_output_dir = os.path.join(SELFPLAY_DIR, model_name)
game_holdout_dir = os.path.join(HOLDOUT_DIR, model_name)
sgf_dir = os.path.join(SGF_DIR, model_name)
main.selfplay_cache_model(
network=network,
output_dir=game_output_dir,
holdout_dir=game_holdout_dir,
output_sgf=sgf_dir,
readouts=readouts,
holdout_pct=HOLDOUT_PCT,
resign_threshold=resign_threshold,
verbose=verbose,
)
def gather():
print("Gathering game output...")
main.gather(input_directory=SELFPLAY_DIR,
output_directory=TRAINING_CHUNK_DIR)
def train():
model_num, model_name = get_latest_model()
print("Training on gathered game data, initializing from {}".format(model_name))
new_model_name = shipname.generate(model_num + 1)
print("New model will be {}".format(new_model_name))
load_file = os.path.join(MODELS_DIR, model_name)
save_file = os.path.join(MODELS_DIR, new_model_name)
#try:
main.train(ESTIMATOR_WORKING_DIR, TRAINING_CHUNK_DIR, save_file,
generation_num=model_num + 1)
#except:
# print("Got an error training, muddling on...")
# logging.exception("Train error")
def validate(model_num=None, validate_name=None):
""" Runs validate on the directories up to the most recent model, or up to
(but not including) the model specified by `model_num`
"""
if model_num is None:
model_num, model_name = get_latest_model()
else:
model_num = int(model_num)
model_name = get_model(model_num)
# Model N was trained on games up through model N-2, so the validation set
# should only be for models through N-2 as well, thus the (model_num - 1)
# term.
models = list(
filter(lambda num_name: num_name[0] < (model_num - 1), get_models()))
# Run on the most recent 50 generations,
# TODO(brianklee): make this hyperparameter dependency explicit/not hardcoded
holdout_dirs = [os.path.join(HOLDOUT_DIR, pair[1])
for pair in models[-50:]]
main.validate(ESTIMATOR_WORKING_DIR, *holdout_dirs,
checkpoint_name=os.path.join(MODELS_DIR, model_name),
validate_name=validate_name)
def echo():
pass # Flags are echo'd in the ifmain block below.
def selfplay_hook(args):
selfplay(**args)
def selfplay_laod_model(model_name):
load_file = os.path.join(MODELS_DIR, model_name)
network = dual_net.DualNetwork(load_file)
return network
def rl_loop():
"""Run the reinforcement learning loop
This tries to create a realistic way to run the reinforcement learning with
all default parameters.
"""
if goparams.DUMMY_MODEL:
# monkeypatch the hyperparams so that we get a quickly executing network.
dual_net.get_default_hyperparams = lambda **kwargs: {
'k': 8, 'fc_width': 16, 'num_shared_layers': 1, 'l2_strength': 1e-4, 'momentum': 0.9}
dual_net.TRAIN_BATCH_SIZE = 16
dual_net.EXAMPLES_PER_GENERATION = 64
#monkeypatch the shuffle buffer size so we don't spin forever shuffling up positions.
preprocessing.SHUFFLE_BUFFER_SIZE = 1000
_, model_name = get_latest_model()
network = selfplay_laod_model(model_name)
def count_games():
# returns number of games in the selfplay directory
if not os.path.exists(os.path.join(SELFPLAY_DIR, model_name)):
# directory not existing implies no games have been played yet
return 0
return len(glob.glob(os.path.join(SELFPLAY_DIR, model_name, '*.zz')))
while count_games() < goparams.MAX_GAMES_PER_GENERATION:
selfplay_cache_model(network, model_name)
print('Stopping selfplay after finding {} games played.'.format(count_games()))
if __name__ == '__main__':
#tf.logging.set_verbosity(tf.logging.INFO)
seed = int(sys.argv[2])
print('Self play worker: setting random seed = ', seed)
random.seed(seed)
# tf.set_random_seed(seed)
numpy.random.seed(seed)
# get TF logger
log = logging.getLogger('tensorflow')
log.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create file handler which logs even debug messages
fh = logging.FileHandler('tensorflow.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
log.addHandler(fh)
rl_loop() | [
"libbyandhelen@163.com"
] | libbyandhelen@163.com |
953e6c31eaa9ae2ad009a56edbd9f5e7f5e4d829 | 05ad6d839ba95001c8f861a5b1cd619eef8ae9b1 | /tseo/Programmers/level2/42577_전화번호 목록-1.py | 0686a079eb4905d2bdb2f715c4114ce89982efc4 | [] | no_license | Raziel-JKM/ps_study | cdcacdabf14e3236af96d20276459e51a0c09100 | 07602a8af7e23ca3d406ee1db2a5deab01087268 | refs/heads/master | 2023-08-11T12:20:19.830183 | 2021-09-28T23:57:03 | 2021-09-28T23:57:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | def solution(phone_book):
phone_book.sort() # 일단 사전 순으로 정렬해준다
for i in range(len(phone_book) - 1): # 사전순으로 정렬했으니 바로 뒤에것만 체크하면 된다
if phone_book[i] == phone_book[i + 1][: len(phone_book[i])]:
return False
return True
phone_book_1 = ["119", "97674223", "1195524421"]
phone_book_2 = ["123", "456", "789"]
phone_book_3 = ["12", "123", "1235", "567", "88"]
print(solution(phone_book_1))
print(solution(phone_book_2))
print(solution(phone_book_3))
| [
"t1won.seo@gmail.com"
] | t1won.seo@gmail.com |
ed5c57419016c681c918b6ecaadad48709a3df39 | c97ae1cc922a037484c5d4794d0a657561cf47f3 | /migrations/versions/ec38b22c3a6d_init.py | 35756ee88ac5529887eac689e3424ce5aef72002 | [] | no_license | AlenAlic/clubpromoters | 3059078b02b77745e7a1e49d998f9d24554082e8 | f44b3b20c20d5669c1658036cea35fb9a4f223fc | refs/heads/master | 2022-12-11T14:38:37.824769 | 2019-09-08T19:02:49 | 2019-09-08T19:02:49 | 190,430,315 | 0 | 0 | null | 2022-12-09T22:02:49 | 2019-06-05T16:29:25 | JavaScript | UTF-8 | Python | false | false | 5,333 | py | """init
Revision ID: ec38b22c3a6d
Revises:
Create Date: 2019-07-15 15:30:44.791994
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ec38b22c3a6d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('configuration',
sa.Column('lock_id', sa.Integer(), nullable=False),
sa.Column('mollie_api_key', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('lock_id')
)
op.create_table('party',
sa.Column('party_id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=128), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('party_start_datetime', sa.DateTime(), nullable=True),
sa.Column('party_end_datetime', sa.DateTime(), nullable=True),
sa.Column('status', sa.String(length=128), nullable=False),
sa.Column('num_available_tickets', sa.Integer(), nullable=False),
sa.Column('ticket_price', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('party_id')
)
op.create_index(op.f('ix_party_is_active'), 'party', ['is_active'], unique=False)
op.create_index(op.f('ix_party_status'), 'party', ['status'], unique=False)
op.create_index(op.f('ix_party_title'), 'party', ['title'], unique=False)
op.create_table('users',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('reset_index', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=128), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('access', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('user_id')
)
op.create_index(op.f('ix_users_access'), 'users', ['access'], unique=False)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_is_active'), 'users', ['is_active'], unique=False)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('code',
sa.Column('code_id', sa.Integer(), nullable=False),
sa.Column('code', sa.String(length=8), nullable=False),
sa.Column('active', sa.Boolean(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.user_id'], ),
sa.PrimaryKeyConstraint('code_id')
)
op.create_table('purchase',
sa.Column('purchase_id', sa.Integer(), nullable=False),
sa.Column('price', sa.Integer(), nullable=False),
sa.Column('status', sa.String(length=128), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('email', sa.String(length=128), nullable=False),
sa.Column('hash', sa.String(length=160), nullable=False),
sa.Column('mollie_payment_id', sa.String(length=128), nullable=False),
sa.Column('purchase_datetime', sa.DateTime(), nullable=True),
sa.Column('party_id', sa.Integer(), nullable=True),
sa.Column('code_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['code_id'], ['code.code_id'], ),
sa.ForeignKeyConstraint(['party_id'], ['party.party_id'], ),
sa.PrimaryKeyConstraint('purchase_id')
)
op.create_index(op.f('ix_purchase_email'), 'purchase', ['email'], unique=False)
op.create_table('refund',
sa.Column('refund_id', sa.Integer(), nullable=False),
sa.Column('price', sa.Integer(), nullable=False),
sa.Column('refund_datetime', sa.DateTime(), nullable=True),
sa.Column('purchase_id', sa.Integer(), nullable=True),
sa.Column('mollie_refund_id', sa.String(length=128), nullable=False),
sa.ForeignKeyConstraint(['purchase_id'], ['purchase.purchase_id'], ),
sa.PrimaryKeyConstraint('refund_id')
)
op.create_table('ticket',
sa.Column('ticket_id', sa.Integer(), nullable=False),
sa.Column('used', sa.Boolean(), nullable=False),
sa.Column('purchase_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['purchase_id'], ['purchase.purchase_id'], ),
sa.PrimaryKeyConstraint('ticket_id')
)
op.create_index(op.f('ix_ticket_used'), 'ticket', ['used'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_ticket_used'), table_name='ticket')
op.drop_table('ticket')
op.drop_table('refund')
op.drop_index(op.f('ix_purchase_email'), table_name='purchase')
op.drop_table('purchase')
op.drop_table('code')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_is_active'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_index(op.f('ix_users_access'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_party_title'), table_name='party')
op.drop_index(op.f('ix_party_status'), table_name='party')
op.drop_index(op.f('ix_party_is_active'), table_name='party')
op.drop_table('party')
op.drop_table('configuration')
# ### end Alembic commands ###
| [
"aalic89@gmail.com"
] | aalic89@gmail.com |
956cfc4a2144a4e3125ff02bd15ddec600b56274 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02416/s904586323.py | 26754ca3eaa61bd6db12d57047898099b49a6a55 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | while(1):
a=int(input())
if a==0:
break
a=list(map(int,list(str(a))))
print(sum(a)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a2e3d46a76ecffe410096ef5678a5b238a26f353 | fee03d6cfbea0803ce0bddb0beb9d447def2a59f | /crypten/mpc/primitives/beaver.py | 21758b97dbfbcf97cc53cf87688751d72b088d06 | [
"MIT"
] | permissive | QQ1230/CrypTen | 548c83a57da8570aeb5f7072e2373e98a2302314 | e11c8bfafee6b1d2ebdc43328c2fb487d48070e3 | refs/heads/master | 2023-06-25T21:30:20.988374 | 2021-07-26T16:24:06 | 2021-07-26T16:25:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,546 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
import crypten.communicator as comm
import torch
from crypten.common.util import count_wraps
class IgnoreEncodings:
"""Context Manager to ignore tensor encodings"""
def __init__(self, list_of_tensors):
self.list_of_tensors = list_of_tensors
self.encodings_cache = [tensor.encoder.scale for tensor in list_of_tensors]
def __enter__(self):
for tensor in self.list_of_tensors:
tensor.encoder._scale = 1
def __exit__(self, exc_type, exc_value, exc_traceback):
for i, tensor in enumerate(self.list_of_tensors):
tensor.encoder._scale = self.encodings_cache[i]
def __beaver_protocol(op, x, y, *args, **kwargs):
"""Performs Beaver protocol for additively secret-shared tensors x and y
1. Obtain uniformly random sharings [a],[b] and [c] = [a * b]
2. Additively hide [x] and [y] with appropriately sized [a] and [b]
3. Open ([epsilon] = [x] - [a]) and ([delta] = [y] - [b])
4. Return [z] = [c] + (epsilon * [b]) + ([a] * delta) + (epsilon * delta)
"""
assert op in {
"mul",
"matmul",
"conv1d",
"conv2d",
"conv_transpose1d",
"conv_transpose2d",
}
if x.device != y.device:
raise ValueError(f"x lives on device {x.device} but y on device {y.device}")
provider = crypten.mpc.get_default_provider()
a, b, c = provider.generate_additive_triple(
x.size(), y.size(), op, device=x.device, *args, **kwargs
)
from .arithmetic import ArithmeticSharedTensor
if crypten.mpc.config.active_security:
"""
Reference: "Multiparty Computation from Somewhat Homomorphic Encryption"
Link: https://eprint.iacr.org/2011/535.pdf
"""
f, g, h = provider.generate_additive_triple(
x.size(), y.size(), op, device=x.device, *args, **kwargs
)
t = ArithmeticSharedTensor.PRSS(a.size(), device=x.device)
t_plain_text = t.get_plain_text()
rho = (t_plain_text * a - f).get_plain_text()
sigma = (b - g).get_plain_text()
triples_check = t_plain_text * c - h - sigma * f - rho * g - rho * sigma
triples_check = triples_check.get_plain_text()
if torch.any(triples_check != 0):
raise ValueError("Beaver Triples verification failed!")
# Vectorized reveal to reduce rounds of communication
with IgnoreEncodings([a, b, x, y]):
epsilon, delta = ArithmeticSharedTensor.reveal_batch([x - a, y - b])
# z = c + (a * delta) + (epsilon * b) + epsilon * delta
c._tensor += getattr(torch, op)(epsilon, b._tensor, *args, **kwargs)
c._tensor += getattr(torch, op)(a._tensor, delta, *args, **kwargs)
c += getattr(torch, op)(epsilon, delta, *args, **kwargs)
return c
def mul(x, y):
return __beaver_protocol("mul", x, y)
def matmul(x, y):
return __beaver_protocol("matmul", x, y)
def conv1d(x, y, **kwargs):
return __beaver_protocol("conv1d", x, y, **kwargs)
def conv2d(x, y, **kwargs):
return __beaver_protocol("conv2d", x, y, **kwargs)
def conv_transpose1d(x, y, **kwargs):
return __beaver_protocol("conv_transpose1d", x, y, **kwargs)
def conv_transpose2d(x, y, **kwargs):
return __beaver_protocol("conv_transpose2d", x, y, **kwargs)
def square(x):
"""Computes the square of `x` for additively secret-shared tensor `x`
1. Obtain uniformly random sharings [r] and [r2] = [r * r]
2. Additively hide [x] with appropriately sized [r]
3. Open ([epsilon] = [x] - [r])
4. Return z = [r2] + 2 * epsilon * [r] + epsilon ** 2
"""
provider = crypten.mpc.get_default_provider()
r, r2 = provider.square(x.size(), device=x.device)
with IgnoreEncodings([x, r]):
epsilon = (x - r).reveal()
return r2 + 2 * r * epsilon + epsilon * epsilon
def wraps(x):
"""Privately computes the number of wraparounds for a set a shares
To do so, we note that:
[theta_x] = theta_z + [beta_xr] - [theta_r] - [eta_xr]
Where [theta_i] is the wraps for a variable i
[beta_ij] is the differential wraps for variables i and j
[eta_ij] is the plaintext wraps for variables i and j
Note: Since [eta_xr] = 0 with probability 1 - |x| / Q for modulus Q, we
can make the assumption that [eta_xr] = 0 with high probability.
"""
provider = crypten.mpc.get_default_provider()
r, theta_r = provider.wrap_rng(x.size(), device=x.device)
beta_xr = theta_r.clone()
beta_xr._tensor = count_wraps([x._tensor, r._tensor])
with IgnoreEncodings([x, r]):
z = x + r
theta_z = comm.get().gather(z._tensor, 0)
theta_x = beta_xr - theta_r
# TODO: Incorporate eta_xr
if x.rank == 0:
theta_z = count_wraps(theta_z)
theta_x._tensor += theta_z
return theta_x
def AND(x, y):
"""
Performs Beaver protocol for binary secret-shared tensors x and y
1. Obtain uniformly random sharings [a],[b] and [c] = [a & b]
2. XOR hide [x] and [y] with appropriately sized [a] and [b]
3. Open ([epsilon] = [x] ^ [a]) and ([delta] = [y] ^ [b])
4. Return [c] ^ (epsilon & [b]) ^ ([a] & delta) ^ (epsilon & delta)
"""
from .binary import BinarySharedTensor
provider = crypten.mpc.get_default_provider()
a, b, c = provider.generate_binary_triple(x.size(), y.size(), device=x.device)
# Stack to vectorize reveal
eps_del = BinarySharedTensor.reveal_batch([x ^ a, y ^ b])
epsilon = eps_del[0]
delta = eps_del[1]
return (b & epsilon) ^ (a & delta) ^ (epsilon & delta) ^ c
def B2A_single_bit(xB):
"""Converts a single-bit BinarySharedTensor xB into an
ArithmeticSharedTensor. This is done by:
1. Generate ArithmeticSharedTensor [rA] and BinarySharedTensor =rB= with
a common 1-bit value r.
2. Hide xB with rB and open xB ^ rB
3. If xB ^ rB = 0, then return [rA], otherwise return 1 - [rA]
Note: This is an arithmetic xor of a single bit.
"""
if comm.get().get_world_size() < 2:
from .arithmetic import ArithmeticSharedTensor
return ArithmeticSharedTensor(xB._tensor, precision=0, src=0)
provider = crypten.mpc.get_default_provider()
rA, rB = provider.B2A_rng(xB.size(), device=xB.device)
z = (xB ^ rB).reveal()
rA = rA * (1 - 2 * z) + z
return rA
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
25152339dccd4089309cb91e8af60d00c3605f34 | e2e08d7c97398a42e6554f913ee27340226994d9 | /pyautoTest-master(ICF-7.5.0)/test_case/scg_old/scg_Route/test_c37571.py | 1158f49307441ecff7d1f09b3e73a6a8e5c50136 | [] | no_license | lizhuoya1111/Automated_testing_practice | 88e7be512e831d279324ad710946232377fb4c01 | b3a532d33ddeb8d01fff315bcd59b451befdef23 | refs/heads/master | 2022-12-04T08:19:29.806445 | 2020-08-14T03:51:20 | 2020-08-14T03:51:20 | 287,426,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | import pytest
import time
import sys
from page_obj.scg.scg_def import *
from page_obj.scg.scg_def_log import *
from page_obj.common.rail import *
from os.path import dirname, abspath
from page_obj.scg.scg_def_multi_isp import *
sys.path.insert(0, dirname(dirname(abspath(__file__))))
test_id = 37571
# ISP导入目的ip file文件名包含空格、$#&?>
def test_route_wxw(browser):
try:
login_web(browser, url="10.2.2.82")
add_multi_isp_save_wxw(browser, name='isp571', desc='miaoshu')
import_ip_config_file_wxw(browser, name='isp571', save='yes', cancel='no', file='isp_37571.txt')
time.sleep(1)
alert = browser.find_element_by_xpath('//*[@id="box"]/div[3]/ul/li[2]').text
# print(alert)
del_multi_isp_byname(browser, name='isp571')
try:
assert "导入IP格式错误" in alert
rail_pass(test_run_id, test_id)
except:
rail_fail(test_run_id, test_id)
assert "导入IP格式错误" in alert
except Exception as err:
# 如果上面的步骤有报错,重新设备,恢复配置
reload(hostip="10.2.2.82")
print(err)
rail_fail(test_run_id, test_id)
time.sleep(70)
assert False
if __name__ == '__main__':
pytest.main(["-v", "-s", "test_c"+str(test_id)+".py"]) | [
"15501866985@163.com"
] | 15501866985@163.com |
e29a6774ea3bcc2d0740acaac977a47a5a39885f | f2dde3b0cc30ebce0210fd2e69a6ee0d91274d6f | /semana_09/aula_01/ascii.py | b9988602b70cd063262fb133fcd23786f0ce98b6 | [] | no_license | valeriacavalcanti/IP-2021.1 | 26ecbe187b32d067666e732f7dd918375e6152f8 | 39da35eaceec0a34cccfcc1731ffa9b94b5231f7 | refs/heads/main | 2023-08-06T16:00:09.393942 | 2021-09-27T14:16:07 | 2021-09-27T14:16:07 | 383,632,386 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | print('Símbolos numéricos')
for i in range(48, 58):
print(i, chr(i))
print('Alfabeto maiúsculo')
for i in range(65, 91):
print(i, chr(i))
print('Alfabeto minúsculo')
for i in range(97, 123):
print(i, chr(i))
| [
"valeria.cavalcanti@ifpb.edu.br"
] | valeria.cavalcanti@ifpb.edu.br |
b8d21479535f1a8d006151ded11a93eee5587ff8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2221/60720/297812.py | 8a81748de60eb70d69e1f8fa57deb946c91feb4c | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | lst=input().split()
n=int(lst[0])
m=int(lst[1])
gra=[[0 for i in range(n)]for j in range(n)]
for i in range(m):
list0=input().split()
list0=list(map(int,list0))
gra[list0[0]-1][list0[1]-1]=1
for k in range(n):
for i in range(n):
for j in range(n):
if gra[i][k]==1 and gra[k][j]==1:
gra[i][j]=1
count=0
for i in range(n):
isF=False
for j in range(n):
if gra[j][i]==0 and j!=i:
isF=True
break
if not isF:
count+=1
print(count) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
0ea3e9ce3b1bb10704f06fbf994b4bd9adaa1ca4 | 08b6bd207b3b1f8a9238ec07858f3d861a361225 | /sampling500.py | e20e951a7e151922609ce7edf026ffd893dc7b44 | [] | no_license | hhk86/CSI500 | bfad5ad27af638856bd3d1cb86dfb11d989f69fa | fa62ade74cff303070e8fc5eb2eccb5c26aacc28 | refs/heads/master | 2020-09-20T18:29:27.826561 | 2019-12-24T09:21:30 | 2019-12-24T09:21:30 | 224,559,393 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,115 | py | import cx_Oracle
import pandas as pd
import matplotlib.pyplot as plt
import os
import shutil
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
class OracleSql(object):
'''
Query data from database
'''
def __init__(self, pt=False):
'''
Initialize database
'''
self.host, self.oracle_port = '18.210.64.72', '1521'
self.db, self.current_schema = 'tdb', 'wind'
self.user, self.pwd = 'reader', 'reader'
self.pt = pt
def __enter__(self):
'''
Connect to database
:return: self
'''
self.conn = self.__connect_to_oracle()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.close()
def __connect_to_oracle(self):
'''
Connect to database
:return: connection
'''
dsn = self.host + ':' + self.oracle_port + '/' + self.db
try:
connection = cx_Oracle.connect(self.user, self.pwd, dsn, encoding="UTF-8", nencoding="UTF-8")
connection.current_schema = self.current_schema
if self.pt is True:
print('Connected to Oracle database successful!')
except Exception:
print('Failed on connecting to Oracle database!')
connection = None
return connection
def query(self, sql: str) -> pd.DataFrame:
'''
Query data
'''
return pd.read_sql(sql, self.conn)
def execute(self, sql: str):
'''
Execute SQL scripts, including inserting and updating
'''
self.conn.cursor().execute(sql)
self.conn.commit()
def lowCaseDfColumns(df: pd.DataFrame) -> pd.DataFrame:
'''
:param df: pd.DataFrame
:return: pd.DataFrame
'''
df.columns = [s.lower() for s in df.columns]
return df
def backtest(startDate: str, endDate: str, filter_param={"minPrice": 3, "maxPrice": 10000}, plot=True):
'''
:param startDate: str, included in the interval
:param endDate: str, included in the interval
:param filter_param: dict, dictionary of filter parameters
:param plot: bool, whether to plot the cumulative return
:return: pd.DataFrame, table of returns of CSI500 and sample
'''
CSI500Returns = getCSI500Returns(startDate, endDate)
sampleReturns = getSampleReturns(startDate, endDate, filter_param=filter_param)
returns_df = pd.concat([CSI500Returns,sampleReturns], axis=1)
returns_df.columns = ["CSI500DailyReturn", "CSI500CumulativeReturn","sampleDailyReturn", "sampleCumulativeReturn"]
period = startDate + '_' + endDate
try:
shutil.rmtree(period)
except:
pass
path = period + '\\'
os.makedirs(path)
returns_df.to_csv(path + "CSI500_vs_sample_" + period + ".csv")
if plot is True:
plotResult(returns_df, period, path)
def getTradingDays(startDate: str, endDate: str) -> list:
sql = \
'''
SELECT
''' + '''
TRADE_DAYS
FROM
asharecalendar
WHERE
S_INFO_EXCHMARKET = 'SSE'
AND trade_days BETWEEN {} AND {}
'''.format(startDate, endDate)
with OracleSql() as oracle:
tradingDays = oracle.query(sql)
return list(tradingDays.TRADE_DAYS)
def getCSI500Returns(startDate: str, endDate: str) -> pd.DataFrame:
'''
:param startDate: str, included in the interval
:param endDate: str, included in the interval
:return: pd.DataFrame, table of returns of CSI500
'''
sql = \
'''
SELECT
''' + '''
TRADE_DT, S_DQ_PCTCHANGE
FROM
AIndexEODPrices
WHERE
S_INFO_WINDCODE = 'h00905.CSI'
AND Trade_dt between {}
AND {}
ORDER BY TRADE_DT
'''.format(startDate, endDate)
with OracleSql() as oracle:
CSI500Returns = oracle.query(sql)
CSI500Returns = lowCaseDfColumns(CSI500Returns)
CSI500Returns.set_index("trade_dt", inplace=True)
CSI500Returns.columns = ["return", ]
CSI500Returns /= 100
CSI500Returns["cumprod"] = (CSI500Returns["return"] + 1).cumprod()
return CSI500Returns
def getSampleReturns(startDate: str, endDate: str, filter_param={"minPrice": 3, "maxPrice": 10000}) -> pd.DataFrame:
'''
:param startDate: str, included in the interval
:param endDate: str, included in the interval
:return: pd.DataFrame, table of returns of sample
'''
global useLocalData
print("start backtesting")
if useLocalData is True:
dailyReturnOfAShare = pd.read_csv("adjustedDailyReturnOfAShare.csv")
dailyReturnOfAShare = lowCaseDfColumns(dailyReturnOfAShare)
dailyReturnOfAShare.trade_dt = dailyReturnOfAShare.trade_dt.astype(str)
sampleReturn_array = pd.Series()
stat = pd.DataFrame(columns = ["NumOfConstituent", "SumOfWeight"])
for date in getTradingDays(startDate, endDate):
oneDayReturnOfAShare = dailyReturnOfAShare[dailyReturnOfAShare["trade_dt"] == date]
oneDaySample = getDailySample(date, filter_param=filter_param)
oneDaySample = pd.merge(oneDaySample, oneDayReturnOfAShare, left_index=True,
right_on="s_info_windcode")
oneDaysampleReturn = oneDaySample.adjustedWeight.mul(oneDaySample.dailyreturn + 1).sum() / 100 - 1
sampleReturn_array[date] = oneDaysampleReturn
stat.loc[date,:] = [len(oneDaySample), oneDaySample.weight.sum()]
print(date)
# print(stat)
# stat.to_csv("stat.csv")
sampleReturns = pd.DataFrame(sampleReturn_array, columns=["return", ])
sampleReturns["cumprod"] = (sampleReturns["return"] + 1).cumprod()
return sampleReturns
def getDailySample(tradeDay: str, filter_param={"minPrice": 3, "maxPrice": 10000}) -> pd.DataFrame:
'''
:param tradeDay: str, "yyyymmdd"
:param filter_param: dict, dictionary of filter parameters
:return: pd.DataFrame
'''
daily500 = getDaily500Stock(tradeDay)
daily500IndustryWeight = daily500.groupby("industriesName")["weight"].sum()
dailySample = daily500[daily500["closeValue"] >= filter_param["minPrice"]]
dailySampleIndustryWeight = dailySample.groupby("industriesName")["weight"].sum()
scale = pd.DataFrame(daily500IndustryWeight.div(dailySampleIndustryWeight))
scale.columns = ["scale", ]
dailySample = pd.merge(dailySample, scale, left_on="industriesName", right_index=True)
dailySample["adjustedWeight"] = dailySample["weight"].mul(dailySample["scale"])
return dailySample
def getDaily500Stock(tradeDay: str) -> pd.DataFrame:
'''
:param tradeday: str, "yyyymmdd"
:return: pd.DataFrame
'''
daily500Data = getDaily500Data(tradeDay)
industryCode = getIndustryCode()
daily500Stock = pd.merge(daily500Data, industryCode, left_index=True, right_index=True)
daily500Stock = daily500Stock[["s_info_name", "industriesname", "weight", "closevalue"]]
daily500Stock.columns = ["stockName", "industriesName", "weight", "closeValue"]
return daily500Stock
def getDaily500Data(tradeDay: str) -> pd.DataFrame:
'''
:param tradday: str, "yyyymmdd"
:param useOracle: bool, get daily data from local or Oracle
:return: pd.DataFrame
'''
global useLocalData
global CSI500WeightData
if useLocalData is True:
daily500Data = CSI500WeightData[CSI500WeightData["trade_dt"] == tradeDay]
daily500Data.set_index(["s_con_windcode", ], inplace=True)
else:
sql = \
'''
SELECT
''' + '''
a.trade_dt,
a.s_con_windcode,
b.S_INFO_NAME,
a.tot_shr,
a.free_shr_ratio,
a.shr_calculation,
a.closevalue,
a.open_adjusted,
a.weight
FROM
aindexcsi500weight a, ASHAREDESCRIPTION b
WHERE
trade_dt = {}
AND a.S_CON_WINDCODE = b.S_INFO_WINDCODE
'''.format(tradeDay)
with OracleSql() as oracle:
daily500Data = oracle.query(sql)
daily500Data = lowCaseDfColumns(daily500Data)
daily500Data.set_index(["s_con_windcode", ], inplace=True)
return daily500Data
def getIndustryCode() -> pd.DataFrame:
'''
Get CITICS industry code
:return: pd.DataFrame
'''
sql_1 = \
'''
SELECT
''' + '''
a.s_info_windcode,
b.Industriesname
FROM
AShareIndustriesClassCITICS a,
AShareIndustriesCode b
WHERE
substr( a.citics_ind_code, 1, 4 ) = substr( b.IndustriesCode, 1, 4 )
AND b.levelnum = '2'
AND a.cur_sign = '1'
ORDER BY
1
'''
sql_2 = \
'''
SELECT
''' + '''
a.s_info_windcode,
b.Industriesname
FROM
AShareIndustriesClassCITICS a,
AShareIndustriesCode b
WHERE
substr( b.IndustriesCode, 1, 4 ) = 'b10m'
AND substr( a.citics_ind_code, 1, 6 ) = substr( b.IndustriesCode, 1, 6 )
AND b.levelnum = '3'
AND a.cur_sign = '1'
ORDER BY
1
'''
with OracleSql() as oracle:
industryCode_1 = oracle.query(sql_1)
industryCode_2 = oracle.query(sql_2)
industryCode_1 = lowCaseDfColumns(industryCode_1)
industryCode_2 = lowCaseDfColumns(industryCode_2)
industryCode = pd.concat(
[industryCode_1[industryCode_1["industriesname"] != "非银行金融"], industryCode_2])
industryCode.columns = ["windcode", "industriesname"]
industryCode.set_index(["windcode", ], drop=True, inplace=True)
return industryCode
def getDateTickLabel(tradeDay_list: list, numOfTicks = 5) -> (list, list):
'''
Give a trade day list and return xticks and xticklabels
:param tradeDay_ls: list
:return: (xticks, xticklabels)
'''
xticks = list()
N = len(tradeDay_list)
interval = N // numOfTicks
for i in range(N):
if i * interval < N:
xticks.append(tradeDay_list[i * interval])
xticklabels = xticks
return (xticks, xticklabels)
def plotResult(returns_df: pd.DataFrame, period: str, path: str):
'''
Plot backtesting result
:param returns_df: pd.DataFrame, df of daily returns and cumulative returns.
'''
plt.plot(returns_df["CSI500CumulativeReturn"])
plt.plot(returns_df["sampleCumulativeReturn"])
xticks, xticklabels = getDateTickLabel(list(returns_df.index))
plt.xticks(xticks, xticklabels)
plt.legend(["CSI500", "sample"])
plt.title("P&L")
plt.savefig(path + "cumulative_return_" + period + ".png")
plt.show()
plt.close()
plt.plot(returns_df["CSI500DailyReturn"])
plt.plot(returns_df["sampleDailyReturn"])
xticks, xticklabels = getDateTickLabel(list(returns_df.index))
plt.xticks(xticks, xticklabels)
plt.legend(["CSI500", "sample"])
plt.title("Daily Return")
plt.savefig(path + "daily_return_" + period + ".png")
plt.show()
plt.close()
plt.plot(returns_df.sampleCumulativeReturn - returns_df.CSI500CumulativeReturn)
xticks, xticklabels = getDateTickLabel(list(returns_df.index))
plt.xticks(xticks, xticklabels)
plt.title("Difference of P&L : sample - CSI500")
plt.savefig(path + "cumulative_diff_" + period + ".png")
plt.show()
plt.close()
plt.plot(returns_df.sampleDailyReturn - returns_df.CSI500DailyReturn)
xticks, xticklabels = getDateTickLabel(list(returns_df.index))
plt.xticks(xticks, xticklabels)
plt.title("Difference of daily return : sample - CSI500")
plt.savefig(path + "daily_return_diff_" + period + ".png")
plt.show()
plt.close()
if __name__ == "__main__":
useLocalData = True
CSI500WeightData = pd.read_csv("CSI500WeightData.csv")
CSI500WeightData = lowCaseDfColumns(CSI500WeightData)
CSI500WeightData.trade_dt = CSI500WeightData.trade_dt.astype(str)
# sampleReturns = backtest("20190101", "20190710")
sampleReturns = backtest("20190615", "20190710")
| [
"hhk0@outlook.com"
] | hhk0@outlook.com |
84079750838c6620e35b81b29ccde5c2c4f1b3cd | 0f9ffc69d45fdbfcb2cfac7b674cf2260efaf11a | /prediction/src/tests/test_crop_patches.py | 24b09006b80edb7d42073d5ab7a2323a388e951c | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | aslam/concept-to-clinic | 5835a8f6edb0abcd6aa700ac94878dab804f76ee | b69a6631ad007c5eca5280169c1db96444fd39ff | refs/heads/master | 2021-07-16T22:42:47.041110 | 2017-10-23T21:27:41 | 2017-10-24T12:36:14 | 106,923,326 | 0 | 1 | null | 2017-10-14T11:45:02 | 2017-10-14T11:45:01 | null | UTF-8 | Python | false | false | 656 | py | import pytest
from src.preprocess import load_ct, crop_patches
@pytest.fixture
def ct_path():
return '../images/LUNA-0001/'\
+ '1.3.6.1.4.1.14519.5.2.1.6279.6001.102133688497886810253331438797'
def test_patches_from_ct(ct_path):
centroids = [[556, 101, -70], [556, 121, -20], [556, 221, -77]]
centroids = [{'z': centroid[0], 'y': centroid[1], 'x': centroid[2]} for centroid in centroids]
patches = crop_patches.patches_from_ct(*load_ct.load_ct(ct_path), patch_shape=12, centroids=centroids)
assert isinstance(patches, list)
assert len(patches) == 3
assert all([patch.shape == (12, 12, 12) for patch in patches])
| [
"chris@chris-lamb.co.uk"
] | chris@chris-lamb.co.uk |
6a07c28a16fa5a2def63726f6197bdc76895054f | 46544b5f01eed38d69be41aabe83c6d6089cad52 | /classifier_system/model_training/BERT/bert3.py | 64e889dc04f05420ecf77958a43d8933b265a129 | [] | no_license | eirikdahlen/MSc-Computer-Science-2021 | 042a7c5a5b9bb19567ca301b427c872a209c25ee | bbb1264bbc3305b1357772f4e434ff987ad2c919 | refs/heads/main | 2023-05-15T01:47:13.907667 | 2021-06-05T09:05:32 | 2021-06-05T09:05:32 | 374,064,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,894 | py | import pandas as pd
import numpy as np
import time
from matplotlib import pyplot
import tensorflow as tf
from transformers import BertTokenizerFast, TFBertForSequenceClassification
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import os
def load_and_setup_dataset(filename: str):
df = pd.read_csv(filename)
return df['text'].values.tolist(), df["label"].values.tolist()
def create_test_val_dataset(X, y, train_size: float, test_size=None, random_state: int = 42):
X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=train_size, test_size=test_size,
random_state=random_state, stratify=y)
return X_train, X_val, y_train, y_val
def to_categorical_labels(y_train, y_val, y_test, binary: bool = False):
labels_dict = {'unrelated': 0, 'proED': 1, 'prorecovery': 0 if binary else 2}
for i in range(len(y_train)):
y_train[i] = labels_dict[y_train[i]]
for i in range(len(y_val)):
y_val[i] = labels_dict[y_val[i]]
for i in range(len(y_test)):
y_test[i] = labels_dict[y_test[i]]
return np.array(y_train), np.array(y_val), np.array(y_test)
def tokenize(tokenizer, X_train, X_val, X_test, truncation: bool = True, padding: bool = True):
train_encodings = tokenizer(X_train, truncation=truncation, padding=padding)
val_encodings = tokenizer(X_val, truncation=truncation, padding=padding)
test_encodings = tokenizer(X_test, truncation=truncation, padding=padding)
train_encodings = np.array(list(dict(train_encodings).values()))
val_encodings = np.array(list(dict(val_encodings).values()))
test_encodings = np.array(list(dict(test_encodings).values()))
return train_encodings, val_encodings, test_encodings
def train_model(model,
train_encodings,
y_train,
val_encodings,
y_val,
batch_size: int,
learning_rate: float,
epochs: int,
checkpoint_path: str,
save_model_weights: bool = True):
if save_model_weights:
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(optimizer=optimizer,
loss=model.compute_loss,
metrics=['accuracy'])
history = model.fit(
x=train_encodings[0],
y=y_train,
validation_data=(val_encodings[0], y_val),
epochs=epochs,
batch_size=batch_size,
callbacks=[cp_callback] if save_model_weights else None
)
print(history.history)
return model, history
def load_weights(model, cp_dir: str):
latest = tf.train.latest_checkpoint(cp_dir)
model.load_weights(latest)
return model
def predict(model, test_data, test_labels, softmax: bool = False):
print("Performing predictions...")
logits = model.predict(test_data[0])["logits"]
if softmax:
predictions_probabilities = tf.nn.softmax(logits, axis=1)
print(predictions_probabilities)
classes = np.argmax(logits, axis=-1)
score = classification_report(test_labels, classes, digits=3)
print(score)
return predictions_probabilities if softmax else logits
def plot_stats(history, should_show=True):
# plot loss during training
pyplot.figure(1)
pyplot.title('Loss')
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='validation')
pyplot.legend()
pyplot.savefig('loss_bert3.png')
# plot accuracy during training
pyplot.figure(2)
pyplot.title('Accuracy')
pyplot.plot(history.history['accuracy'], label='train')
pyplot.plot(history.history['val_accuracy'], label='validation')
pyplot.legend()
pyplot.savefig('acc_bert3.png')
if should_show:
pyplot.show()
def main(args):
start = time.time()
use_idun = args.idun
load_model = args.loadmodel
training_data_path = '/cluster/home/eirida/masteroppgave/Masteroppgave/data/dataset_training.csv' if use_idun else '../../data/dataset_training.csv'
test_data_path = '/cluster/home/eirida/masteroppgave/Masteroppgave/data/dataset_test.csv' if use_idun else '../../data/dataset_test.csv'
X, y = load_and_setup_dataset(training_data_path)
X_test, y_test = load_and_setup_dataset(test_data_path)
X_train, X_val, y_train, y_val = create_test_val_dataset(X, y, train_size=0.95)
y_train, y_val, y_test = to_categorical_labels(y_train, y_val, y_test)
train_encodings, val_encodings, test_encodings = tokenize(
tokenizer=BertTokenizerFast.from_pretrained('bert-base-uncased'),
X_train=X_train,
X_val=X_val,
X_test=X_test
)
model = TFBertForSequenceClassification.from_pretrained('bert-base-uncased',
num_labels=3,
return_dict=True)
checkpoint_path = "bert3_ckpt/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
if not load_model:
trained_model, history = train_model(model=model,
train_encodings=train_encodings,
y_train=y_train,
val_encodings=val_encodings,
y_val=y_val,
batch_size=16,
learning_rate=2e-5,
epochs=4,
checkpoint_path=checkpoint_path,
save_model_weights=True)
plot_stats(history, should_show=not use_idun)
trained_model.summary()
else:
trained_model = load_weights(model=model, cp_dir=checkpoint_dir)
predictions = predict(model=trained_model,
test_data=test_encodings,
test_labels=y_test,
softmax=False)
print(f"Used {time.time() - start} seconds")
if __name__ == "__main__":
print("Starting run...")
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
print(f"Number of GPUs Avail: {len(tf.config.list_physical_devices('GPU'))}")
print(f"GPU Name: {tf.test.gpu_device_name()}")
print(f"Cuda: {tf.test.is_built_with_cuda()}")
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--idun", default=False, type=bool)
parser.add_argument("--loadmodel", default=False, type=bool)
args = parser.parse_args()
main(args)
| [
"dahleneirik@gmail.com"
] | dahleneirik@gmail.com |
293d7a125b5c351ae3078beeb9539e8d7d5fcc6c | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-cce/huaweicloudsdkcce/v3/model/update_node_response.py | 7b48171cdbec154f61e09ae2d0a43a66df3320fe | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 5,575 | py | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class UpdateNodeResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'kind': 'str',
'api_version': 'str',
'metadata': 'NodeMetadata',
'spec': 'V3NodeSpec',
'status': 'V3NodeStatus'
}
attribute_map = {
'kind': 'kind',
'api_version': 'apiVersion',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, kind=None, api_version=None, metadata=None, spec=None, status=None):
"""UpdateNodeResponse - a model defined in huaweicloud sdk"""
super(UpdateNodeResponse, self).__init__()
self._kind = None
self._api_version = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if kind is not None:
self.kind = kind
if api_version is not None:
self.api_version = api_version
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def kind(self):
"""Gets the kind of this UpdateNodeResponse.
API类型,固定值“Node”,该值不可修改。
:return: The kind of this UpdateNodeResponse.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this UpdateNodeResponse.
API类型,固定值“Node”,该值不可修改。
:param kind: The kind of this UpdateNodeResponse.
:type: str
"""
self._kind = kind
@property
def api_version(self):
"""Gets the api_version of this UpdateNodeResponse.
API版本,固定值“v3”,该值不可修改。
:return: The api_version of this UpdateNodeResponse.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this UpdateNodeResponse.
API版本,固定值“v3”,该值不可修改。
:param api_version: The api_version of this UpdateNodeResponse.
:type: str
"""
self._api_version = api_version
@property
def metadata(self):
"""Gets the metadata of this UpdateNodeResponse.
:return: The metadata of this UpdateNodeResponse.
:rtype: NodeMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this UpdateNodeResponse.
:param metadata: The metadata of this UpdateNodeResponse.
:type: NodeMetadata
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this UpdateNodeResponse.
:return: The spec of this UpdateNodeResponse.
:rtype: V3NodeSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this UpdateNodeResponse.
:param spec: The spec of this UpdateNodeResponse.
:type: V3NodeSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this UpdateNodeResponse.
:return: The status of this UpdateNodeResponse.
:rtype: V3NodeStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this UpdateNodeResponse.
:param status: The status of this UpdateNodeResponse.
:type: V3NodeStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateNodeResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
1125c5f8f204bacc054f119057d2ae918dd56e1c | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2686/61132/297976.py | adbfd0fc47f662b3b54e6cb919a6f0d04f66126e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | def mmgp(l,k_2):
if k_2==0 or not l:return 0
ans=[]
for i in range(len(l)):
ans.append((l[i] if k_2%2==1 else -l[i])+max(0,mmgp(l[i+1:],k_2-1)))
return max(0,max(ans))
t = int(input())
for j in range(t):
k=int(input())
m=int(input())
l=list(map(int,input().split()))
print(mmgp(l,k*2)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
7ff3024f8e259f0946690e895e06a842fa367d9c | 898f547bbeb7d1da27bc40e2d594a363c0d1a75a | /Advert of code 2020/day_15_rambunctious_recitation_2.py | d2571e70b1d689a3e7d57b3c97e366efd177ca3b | [] | no_license | TerryLun/Code-Playground | 4e069e28c457309329f003ea249be83d7578a4a3 | 708ad69594cf5b9edc9ff1189716cad70916574c | refs/heads/master | 2023-06-20T14:03:43.924472 | 2021-07-23T05:27:48 | 2021-07-23T05:27:48 | 237,375,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | nums = [6, 13, 1, 15, 2, 0]
end_turn = 30000000
num_dict = {nums[i]: i for i in range(len(nums))}
num_dict.pop(nums[-1])
i = len(nums)
while i != end_turn:
num = nums[-1]
if num not in num_dict:
nums.append(0)
else:
nums.append(i - 1 - num_dict[num])
num_dict[num] = i - 1
i += 1
print(nums[-1])
| [
"tianweilun@yahoo.com"
] | tianweilun@yahoo.com |
53e678355a32e3cece4761413ca5260da6ca75a9 | ac0894b411507bfd027696b6bf11b5e384ed68fc | /need-to-do/python3------download-problem--of--leetcode/740.delete-and-earn.py | cd2f4072af4ce906fcecd6fd076b6305f25f9559 | [] | no_license | mkzpd/leetcode-solution | 1d19554628c34c74012fa52582c225e6dccb345c | 60c9b218683bcdee86477a910c58ec702185c726 | refs/heads/master | 2020-05-31T05:56:48.985529 | 2019-09-20T09:10:49 | 2019-09-20T09:10:49 | 190,128,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | #
# @lc app=leetcode id=740 lang=python3
#
# [740] Delete and Earn
#
# https://leetcode.com/problems/delete-and-earn/description/
#
# algorithms
# Medium (46.66%)
# Total Accepted: 25.3K
# Total Submissions: 54.3K
# Testcase Example: '[3,4,2]'
#
# Given an array nums of integers, you can perform operations on the array.
#
# In each operation, you pick any nums[i] and delete it to earn nums[i] points.
# After, you must delete every element equal to nums[i] - 1 or nums[i] + 1.
#
# You start with 0 points. Return the maximum number of points you can earn by
# applying such operations.
#
# Example 1:
#
#
# Input: nums = [3, 4, 2]
# Output: 6
# Explanation:
# Delete 4 to earn 4 points, consequently 3 is also deleted.
# Then, delete 2 to earn 2 points. 6 total points are earned.
#
#
#
#
# Example 2:
#
#
# Input: nums = [2, 2, 3, 3, 3, 4]
# Output: 9
# Explanation:
# Delete 3 to earn 3 points, deleting both 2's and the 4.
# Then, delete 3 again to earn 3 points, and 3 again to earn 3 points.
# 9 total points are earned.
#
#
#
#
# Note:
#
#
# The length of nums is at most 20000.
# Each element nums[i] is an integer in the range [1, 10000].
#
#
#
#
#
class Solution:
def deleteAndEarn(self, nums: List[int]) -> int:
| [
"sodgso262@gmail.com"
] | sodgso262@gmail.com |
7b3b70fb89631556f950e1f7dfb7b72bcd1be568 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/46/usersdata/112/17793/submittedfiles/funcoes1.py | 3d6b39846fd6d1dbca5db4e88c86720de38cd906 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | # -*- coding: utf-8 -*-
from __future__ import division
n=input('Digite a quantidade de números das listas:')
a=[]
b=[]
c=[]
for i in range (0,n,1):
a.append(input('Digite um valor da lista A:'))
def crescente(lista):
p=0
cont=0
for i in range (0,len(lista)-1,1):
if lista[i]<lista[i+1]:
cont=cont+1
if lista[i]==lista[i+1]:
p=p+1
if cont>0 and p==0:
return True
else:
return False
def decrescente(lista):
d=0
cont2=0
for i in range (0,len(lista)-1,1):
if lista[i]>lista[i+1]:
cont2=cont2+1
if lista[i]==lista[i+1]:
d=d+1
if cont2>0 and d==0:
return True
else:
return False
def elementosiguais(lista):
cont3=0
for i in range (0,len(lista)-1,1):
if lista[i]==lista[i+1] or lista[i]==lista[i-1]:
cont3=cont3+1
if cont3>0:
return True
else:
return False
if crescente(a):
print 'S'
else:
print 'N'
if decrescente(a):
print 'S'
else:
print 'N'
if elementosiguais(a):
print 'S'
else:
print 'N'
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f9b6c72db9ba6a1cccfb7d004703079c2ee42d1d | 2c5073c0140b3366b94866d50f8b975c926a529b | /venv/lib/python3.9/site-packages/mediapipe/calculators/core/concatenate_vector_calculator_pb2.py | 6b016b1af3cf664281fd9ea73b2cb9b9b1874a9c | [] | no_license | geekboi777/Volumegesture | 435c2752d107ac6915919e79bcb63fb0b85f6e9e | 3cc35f74533e26588a606154897f9ded4801f0ce | refs/heads/master | 2023-06-24T19:09:07.138900 | 2021-07-30T23:22:18 | 2021-07-30T23:22:18 | 390,512,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,970 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/calculators/core/concatenate_vector_calculator.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mediapipe.framework import calculator_pb2 as mediapipe_dot_framework_dot_calculator__pb2
try:
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe_dot_framework_dot_calculator__options__pb2
except AttributeError:
mediapipe_dot_framework_dot_calculator__options__pb2 = mediapipe_dot_framework_dot_calculator__pb2.mediapipe.framework.calculator_options_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/calculators/core/concatenate_vector_calculator.proto',
package='mediapipe',
syntax='proto2',
serialized_options=b'\242\002\tMediaPipe',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n>mediapipe/calculators/core/concatenate_vector_calculator.proto\x12\tmediapipe\x1a$mediapipe/framework/calculator.proto\"\xaa\x01\n\"ConcatenateVectorCalculatorOptions\x12\'\n\x18only_emit_if_all_present\x18\x01 \x01(\x08:\x05\x66\x61lse2[\n\x03\x65xt\x12\x1c.mediapipe.CalculatorOptions\x18\xcf\xb1\xd8{ \x01(\x0b\x32-.mediapipe.ConcatenateVectorCalculatorOptionsB\x0c\xa2\x02\tMediaPipe'
,
dependencies=[mediapipe_dot_framework_dot_calculator__pb2.DESCRIPTOR,])
_CONCATENATEVECTORCALCULATOROPTIONS = _descriptor.Descriptor(
name='ConcatenateVectorCalculatorOptions',
full_name='mediapipe.ConcatenateVectorCalculatorOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='only_emit_if_all_present', full_name='mediapipe.ConcatenateVectorCalculatorOptions.only_emit_if_all_present', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
_descriptor.FieldDescriptor(
name='ext', full_name='mediapipe.ConcatenateVectorCalculatorOptions.ext', index=0,
number=259397839, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=286,
)
DESCRIPTOR.message_types_by_name['ConcatenateVectorCalculatorOptions'] = _CONCATENATEVECTORCALCULATOROPTIONS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ConcatenateVectorCalculatorOptions = _reflection.GeneratedProtocolMessageType('ConcatenateVectorCalculatorOptions', (_message.Message,), {
'DESCRIPTOR' : _CONCATENATEVECTORCALCULATOROPTIONS,
'__module__' : 'mediapipe.calculators.core.concatenate_vector_calculator_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.ConcatenateVectorCalculatorOptions)
})
_sym_db.RegisterMessage(ConcatenateVectorCalculatorOptions)
_CONCATENATEVECTORCALCULATOROPTIONS.extensions_by_name['ext'].message_type = _CONCATENATEVECTORCALCULATOROPTIONS
mediapipe_dot_framework_dot_calculator__options__pb2.CalculatorOptions.RegisterExtension(_CONCATENATEVECTORCALCULATOROPTIONS.extensions_by_name['ext'])
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"geekboi777@github.com"
] | geekboi777@github.com |
339cec3cf7dc028d22cebf879426d42bccc5791f | b49a162f6a3d9983d0254f4154a1542774800654 | /dreamrs/dreamrs/urls.py | dfa7198e2a674b3fa2074c07bd35eee300bbcaa8 | [] | no_license | franckeric96/dreamrs_template | 16b254b61938a4acd11e54cdeb98b0a90f9e24e2 | 762999ee73ee9cdea8b3058f417c5aa22de467b0 | refs/heads/master | 2022-10-16T23:32:11.307371 | 2020-06-16T19:41:57 | 2020-06-16T19:41:57 | 272,797,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | """dreamrs URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('website.urls')),
path('blog/', include('blog.urls')),
path('dreamr/', include('dreamr.urls'))
]
if settings.DEBUG :
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT) | [
"franck@gmail.com"
] | franck@gmail.com |
ccf7a8892ce2951ca0d34da4186b761f109b75ee | 57d67ed3f24279e3da746192a7852870afb8b726 | /utils/writeLog.py | 604fe5cac8e97b66927a597d382ecc70f88ebc04 | [] | no_license | Gaoyang0/BMS | febbdb457a8bc03620069d41d0a9d787d2b00f2f | d4563f41d68a51c1b81eb3d84fc91dcc8d363d69 | refs/heads/master | 2020-03-22T08:38:26.086386 | 2018-07-05T01:31:50 | 2018-07-05T01:31:50 | 139,780,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | # -*- coding:utf-8 -*-
# Author:DaoYang
def log(str, type):
if type == 'book':
f = open('log/books.log', 'a', encoding='utf-8')
f.write('\n'+str)
f.close()
elif type == 'user':
f = open('log/users.log', 'a', encoding='utf-8')
f.write('\n' + str)
f.close()
else:
pass | [
"="
] | = |
b3e881b1e0cd83e2b37ae3ce8706c5822dec27c8 | 624155f764a54b78a73c9e830a6b27cd9945a3e9 | /selling1/selling1/doctype/quotation_info/quotation_info.py | c841d16a1b6dbc5387c3576704de3e708d3d261a | [] | no_license | reddymeghraj/selling1 | f94400c9b3fa4c65fdcbae33fbea7f9dbad66dfa | 5a8154699ed824a71948f927ae4ae026854dea3c | refs/heads/master | 2020-05-30T10:24:42.376586 | 2015-04-18T06:12:39 | 2015-04-18T06:12:39 | 33,288,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | # Copyright (c) 2013, Wayzon and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class QuotationInfo(Document):
pass
| [
"reddymeghraj@gmail.com"
] | reddymeghraj@gmail.com |
65bccc73ed6d22219f439804dbfc24af418827a8 | f13acd0d707ea9ab0d2f2f010717b35adcee142f | /AtCoder_Virtual_Contest/green_diff_400_500_points/abc296/d/main.py | 6a57b4cf333b63023933d82e1878c93a8861beba | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | KATO-Hiro/AtCoder | 126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7 | bf43320bc1af606bfbd23c610b3432cddd1806b9 | refs/heads/master | 2023-08-18T20:06:42.876863 | 2023-08-17T23:45:21 | 2023-08-17T23:45:21 | 121,067,516 | 4 | 0 | CC0-1.0 | 2023-09-14T21:59:38 | 2018-02-11T00:32:45 | Python | UTF-8 | Python | false | false | 458 | py | # -*- coding: utf-8 -*-
def main():
import sys
from math import ceil
input = sys.stdin.readline
n, m = map(int, input().split())
inf = 10**30
ans = inf
# a <= bと仮定
for a in range(1, 10**6 + 1):
b = ceil(m / a)
if not (1 <= a <= n) or not (1 <= b <= n):
continue
ans = min(ans, a * b)
if ans == inf:
ans = -1
print(ans)
if __name__ == "__main__":
main()
| [
"k.hiro1818@gmail.com"
] | k.hiro1818@gmail.com |
607042d8083df04551809e6a17ecf223f81a393b | 89284da682f723c6aaad8ef6bba37ac31cd30c92 | /PythonTutorial/Advance/decorator/code/decorator_with_return_value.py | 8ecc0cb2700bb83f49939f1a459ff2a4f2471ac3 | [] | no_license | Danielyan86/Python-Study | 9d9912e0385c5b4d2b7272e9eaca542ff556dc1a | 782c1638eb9733a4be4acbc4c805a78f0fe77546 | refs/heads/master | 2023-03-17T13:26:31.865927 | 2023-03-05T12:30:07 | 2023-03-05T12:30:07 | 26,902,349 | 28 | 25 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | def do_twice(func):
def wrapper_do_twice(*args, **kwargs):
func(*args, **kwargs)
return func(*args, **kwargs)
return wrapper_do_twice
@do_twice
def return_greeting(name):
print("Creating greeting")
return f"Hi {name}"
if __name__ == '__main__':
print(return_greeting('adam'))
| [
"516495459@qq.com"
] | 516495459@qq.com |
f3ffa5627fc996ed166688d2332ee06f8182251d | 3cfd6f75fc38ab1d27fd32d18faeb3b17d443a0a | /practice_project/manage.py | a0acbf68c06fe0994606ccb92588f88fc72935ad | [] | no_license | rohinizade27/Django | 6572cd5a5e94482acc1469757792cfadbcef47c1 | 0a3adf5ee3bab308ebf9919a3e032c3c1a61e1a0 | refs/heads/master | 2020-04-11T15:18:34.315560 | 2019-01-14T13:14:17 | 2019-01-14T13:14:17 | 161,887,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "practice_project.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"rohinizade43@gmail.com"
] | rohinizade43@gmail.com |
90b1ee7827958b4eed671bd126633cbfe34ab5ca | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_17/models/replica_link_performance_replication.py | e11661abb181788226a8aa744f4ad12d88df9607 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 6,750 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.17
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_17 import models
class ReplicaLinkPerformanceReplication(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bytes_per_sec_from_remote': 'int',
'bytes_per_sec_to_remote': 'int',
'bytes_per_sec_total': 'int',
'direction': 'str',
'local_pod': 'FixedReference',
'remote_pod': 'FixedReference',
'remotes': 'list[FixedReference]',
'time': 'int'
}
attribute_map = {
'bytes_per_sec_from_remote': 'bytes_per_sec_from_remote',
'bytes_per_sec_to_remote': 'bytes_per_sec_to_remote',
'bytes_per_sec_total': 'bytes_per_sec_total',
'direction': 'direction',
'local_pod': 'local_pod',
'remote_pod': 'remote_pod',
'remotes': 'remotes',
'time': 'time'
}
required_args = {
}
def __init__(
self,
bytes_per_sec_from_remote=None, # type: int
bytes_per_sec_to_remote=None, # type: int
bytes_per_sec_total=None, # type: int
direction=None, # type: str
local_pod=None, # type: models.FixedReference
remote_pod=None, # type: models.FixedReference
remotes=None, # type: List[models.FixedReference]
time=None, # type: int
):
"""
Keyword args:
bytes_per_sec_from_remote (int): The number of bytes received per second from a remote array.
bytes_per_sec_to_remote (int): The number of bytes transmitted per second to a remote array.
bytes_per_sec_total (int): Total bytes transmitted and received per second.
direction (str): The direction of replication. Valid values are `inbound` and `outbound`.
local_pod (FixedReference): Reference to a local pod.
remote_pod (FixedReference): Reference to a remote pod.
remotes (list[FixedReference]): Reference to a remote array.
time (int): Sample time in milliseconds since the UNIX epoch.
"""
if bytes_per_sec_from_remote is not None:
self.bytes_per_sec_from_remote = bytes_per_sec_from_remote
if bytes_per_sec_to_remote is not None:
self.bytes_per_sec_to_remote = bytes_per_sec_to_remote
if bytes_per_sec_total is not None:
self.bytes_per_sec_total = bytes_per_sec_total
if direction is not None:
self.direction = direction
if local_pod is not None:
self.local_pod = local_pod
if remote_pod is not None:
self.remote_pod = remote_pod
if remotes is not None:
self.remotes = remotes
if time is not None:
self.time = time
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
if key == "bytes_per_sec_from_remote" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_sec_from_remote`, must be a value greater than or equal to `0`")
if key == "bytes_per_sec_to_remote" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_sec_to_remote`, must be a value greater than or equal to `0`")
if key == "bytes_per_sec_total" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_sec_total`, must be a value greater than or equal to `0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ReplicaLinkPerformanceReplication`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReplicaLinkPerformanceReplication, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReplicaLinkPerformanceReplication):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"noreply@github.com"
] | PureStorage-OpenConnect.noreply@github.com |
fa4b50adc69c5c509df058204821cc4fad6cfbc3 | fa9500caa931204fffd16634528b475fe0372170 | /translate/translate.py | f617048700d70a9089f7790e50bfb13d13705cf0 | [
"MIT"
] | permissive | monospacedmagic/Trusty-cogs | 7e2c36d1da2055b34ea222894f399054d0697ee4 | 764c9282775575f5791d38b744a5df932acac156 | refs/heads/master | 2020-05-05T07:40:10.787031 | 2019-04-03T22:32:27 | 2019-04-03T22:32:27 | 179,833,321 | 1 | 0 | MIT | 2019-04-06T12:54:59 | 2019-04-06T12:54:59 | null | UTF-8 | Python | false | false | 6,889 | py | import logging
from redbot.core import commands, Config, checks
from redbot.core.i18n import Translator, cog_i18n
from .api import GoogleTranslateAPI, FlagTranslation
from .errors import GoogleTranslateAPIError
"""
Translator cog
Cog credit to aziz#5919 for the idea and
Links
Wiki https://goo.gl/3fxjSA
Github https://goo.gl/oQAQde
Support the developer https://goo.gl/Brchj4
Invite the bot to your guild https://goo.gl/aQm2G7
Join the official development guild https://discord.gg/uekTNPj
"""
BASE_URL = "https://translation.googleapis.com"
_ = Translator("Translate", __file__)
log = logging.getLogger("red.Translate")
@cog_i18n(_)
class Translate(GoogleTranslateAPI, commands.Cog):
"""
Translate messages using google translate
"""
__version__ = "2.0.1"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 156434873547, force_registration=True)
default_guild = {
"reaction": False,
"text": False
}
default = {
"api_key": None,
"cooldown": {
"past_flags": [],
"timeout": 0,
"multiple": False,
}
}
self.config.register_guild(**default_guild)
self.config.register_global(**default)
self.cache = {"translations": []}
self.clear_cache = self.bot.loop.create_task(self.cleanup_cache())
@commands.command()
async def translate(self, ctx, to_language: FlagTranslation, *, message: str):
"""
Translate messages with google translate
`to_language` is the language you would like to translate
`message` is the message to translate
"""
if await self.config.api_key() is None:
msg = _("The bot owner needs to set an api key first!")
await ctx.send(msg)
return
try:
detected_lang = await self.detect_language(message)
except GoogleTranslateAPIError as e:
await ctx.send(str(e))
return
from_lang = detected_lang[0][0]["language"]
original_lang = detected_lang[0][0]["language"]
if to_language == original_lang:
return await ctx.send(
_("I cannot translate `{from_lang}` to `{to}`").format(
from_lang=from_lang, to=to_language
)
)
try:
translated_text = await self.translate_text(original_lang, to_language, message)
except GoogleTranslateAPIError as e:
await ctx.send(str(e))
return
author = ctx.message.author
if ctx.channel.permissions_for(ctx.me).embed_links:
translation = (translated_text, from_lang, to_language)
em = await self.translation_embed(author, translation)
await ctx.send(embed=em)
else:
await ctx.send(translated_text)
@commands.group()
@checks.mod_or_permissions(manage_channels=True)
async def translateset(self, ctx):
"""
Toggle the bot auto translating
"""
pass
@translateset.command(aliases=["reaction", "reactions"])
@checks.mod_or_permissions(manage_channels=True)
@commands.guild_only()
async def react(self, ctx):
"""
Toggle translations to flag emoji reactions
"""
guild = ctx.message.guild
toggle = not await self.config.guild(guild).reaction()
if toggle:
verb = _("on")
else:
verb = _("off")
await self.config.guild(guild).reaction.set(toggle)
msg = _("Reaction translations have been turned ")
await ctx.send(msg + verb)
@translateset.command(aliases=["multi"])
@checks.is_owner()
@commands.guild_only()
async def multiple(self, ctx):
"""
Toggle multiple translations for the same message
This will also ignore the translated message from
being translated into another language
"""
toggle = not await self.config.cooldown.multiple()
if toggle:
verb = _("on")
else:
verb = _("off")
await self.config.cooldown.multiple.set(toggle)
msg = _("Multiple translations have been turned ")
await ctx.send(msg + verb)
@translateset.command(aliases=["cooldown"])
@checks.is_owner()
@commands.guild_only()
async def timeout(self, ctx, time: int):
"""
Set the cooldown before a message can be reacted to again
for translation
`<time>` Number of seconds until that message can be reacted to again
Note: If multiple reactions are not allowed the timeout setting
is ignored until the cache cleanup ~10 minutes.
"""
await self.config.cooldown.timeout.set(time)
msg = _("Translation timeout set to {time}s.").format(time=time)
await ctx.send(msg)
@translateset.command(aliases=["flags"])
@checks.mod_or_permissions(manage_channels=True)
@commands.guild_only()
async def flag(self, ctx):
"""
Toggle translations with flag emojis in text
"""
guild = ctx.message.guild
toggle = not await self.config.guild(guild).text()
if toggle:
verb = _("on")
else:
verb = _("off")
await self.config.guild(guild).text.set(toggle)
msg = _("Flag emoji translations have been turned ")
await ctx.send(msg + verb)
@translateset.command()
@checks.is_owner()
async def creds(self, ctx, api_key):
"""
You must get an API key from google to set this up
Note: Using this cog costs money, current rates are $20 per 1 million characters.
1. Go to Google Developers Console and log in with your Google account.
(https://console.developers.google.com/)
2. You should be prompted to create a new project (name does not matter).
3. Click on Enable APIs and Services at the top.
4. In the list of APIs choose or search for Cloud Translate API and click on it.
Choose Enable.
5. Click on Credentials on the left navigation bar.
6. Click on Create Credential at the top.
7. At the top click the link for \"API key\".
8. No application restrictions are needed. Click Create at the bottom.
9. You now have a key to add to `[p]translateset`
"""
await self.config.api_key.set(api_key)
await ctx.send(_("API key set."))
def __unload(self):
self.clear_cache.cancel()
| [
"TrustyJAID@gmail.com"
] | TrustyJAID@gmail.com |
1a4e3c696ae9c091199a668fa86295965e325d04 | 132c7b0c8ba606a249fbdfe24f9d73e7e224d260 | /pages/urls.py | 98943af34a40c86efdabd88a747edabf6f65f189 | [] | no_license | sanyuOnline/sanyu-webapp | dafa3505d7f3d6eca225ca6b4dce3fa683d5e9fe | c8e3824146bb9eb4dcf971a1cdef2bc4475385f1 | refs/heads/main | 2023-08-31T12:52:06.104078 | 2021-10-27T07:03:09 | 2021-10-27T07:03:09 | 406,589,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,761 | py | from django.urls import path
from .views import *
urlpatterns = [
path('', HomeView.as_view(), name='home'),
path('about/', AboutView.as_view(), name='about'),
path('contact-us/', ContactView.as_view(), name='contact_us'),
path('projects/', ProjectsView.as_view(), name='projects'),
path('donate/', DonateView.as_view(), name ='donate'),
#projects
path('projects/human-rights/', P1View.as_view(), name ='p1'),
path('projects/rule-of-law/', P2View.as_view(), name ='p2'),
path('projects/advocacy/', P3View.as_view(), name ='p3'),
path('projects/lgbti-rights/', P4View.as_view(), name ='p4'),
path('projects/transitional-justice/', P5View.as_view(), name ='p5'),
path('projects/migrant-rights/', P6View.as_view(), name ='p6'),
path('projects/human-trafficking/', P7View.as_view(), name ='p7'),
path('projects/international-advocacy/', P8View.as_view(), name ='p8'),
path('projects/civic-education/', P9View.as_view(), name ='p9'),
path('projects/civil-society-building/', P10View.as_view(), name ='p10'),
path('projects/community-mobilization/', P11View.as_view(), name ='p11'),
path('projects/local-governance/', P12View.as_view(), name ='p12'),
path('projects/displaced-persons/', P13View.as_view(), name ='p13'),
path('projects/independent-media/', P14View.as_view(), name ='p14'),
path('projects/sme-development/', P15View.as_view(), name ='p15'),
path('projects/local-economic-development/', P16View.as_view(), name ='p16'),
path('projects/agriculture-development/', P17View.as_view(), name ='p17'),
path('projects/womens-rights/', P18View.as_view(), name ='p18'),
path('projects/electoral-processes/', P19View.as_view(), name ='p19'),
] | [
"jordanrob709@gmail.com"
] | jordanrob709@gmail.com |
746d034aa1d78b642514f31c81a889e12510a073 | 5963c12367490ffc01c9905c028d1d5480078dec | /homeassistant/components/dexcom/__init__.py | 68622a2335080cb4aa4533d90e766a20a7c9031c | [
"Apache-2.0"
] | permissive | BenWoodford/home-assistant | eb03f73165d11935e8d6a9756272014267d7d66a | 2fee32fce03bc49e86cf2e7b741a15621a97cce5 | refs/heads/dev | 2023-03-05T06:13:30.354545 | 2021-07-18T09:51:53 | 2021-07-18T09:51:53 | 117,122,037 | 11 | 6 | Apache-2.0 | 2023-02-22T06:16:51 | 2018-01-11T16:10:19 | Python | UTF-8 | Python | false | false | 2,550 | py | """The Dexcom integration."""
from datetime import timedelta
import logging
from pydexcom import AccountError, Dexcom, SessionError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_UNIT_OF_MEASUREMENT, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
CONF_SERVER,
COORDINATOR,
DOMAIN,
MG_DL,
PLATFORMS,
SERVER_OUS,
UNDO_UPDATE_LISTENER,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=180)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Dexcom from a config entry."""
try:
dexcom = await hass.async_add_executor_job(
Dexcom,
entry.data[CONF_USERNAME],
entry.data[CONF_PASSWORD],
entry.data[CONF_SERVER] == SERVER_OUS,
)
except AccountError:
return False
except SessionError as error:
raise ConfigEntryNotReady from error
if not entry.options:
hass.config_entries.async_update_entry(
entry, options={CONF_UNIT_OF_MEASUREMENT: MG_DL}
)
async def async_update_data():
try:
return await hass.async_add_executor_job(dexcom.get_current_glucose_reading)
except SessionError as error:
raise UpdateFailed(error) from error
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
COORDINATOR: DataUpdateCoordinator(
hass,
_LOGGER,
name=DOMAIN,
update_method=async_update_data,
update_interval=SCAN_INTERVAL,
),
UNDO_UPDATE_LISTENER: entry.add_update_listener(update_listener),
}
await hass.data[DOMAIN][entry.entry_id][
COORDINATOR
].async_config_entry_first_refresh()
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def update_listener(hass, entry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
| [
"noreply@github.com"
] | BenWoodford.noreply@github.com |
e9d92ec8c95f8b20f9de59c693ea7978caa23203 | f0c402d3858f0643561886797578b1e64655b1b3 | /utils/builder/shared/ctrl_file_template_strings.py | fa3d11192a6fc72636b8b5a9e473ff13560d179b | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Leo-Wang-JL/force-riscv | 39ad2a72abd814df4b63879ce9825b6b06a9391a | deee6acaaee092eb90ac2538de122303334e5be3 | refs/heads/master | 2023-01-28T00:06:58.135651 | 2020-11-18T02:54:10 | 2020-11-18T02:54:10 | 271,873,013 | 0 | 0 | NOASSERTION | 2020-06-28T00:51:26 | 2020-06-12T19:15:26 | C++ | UTF-8 | Python | false | false | 1,102 | py | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#ctrl file template strings
ctrl_items_template = 'control_items = [ '
fname_template = '"fname":"{}*_force.py"'
generator_template = ',"generator":{{{}}}'
options_template = ',"options":{{{}}}'
performance_template = ',"performance":{{{}}}'
#option strings
noiss = '"--noiss":None,'
nosim = '"no-sim":True,'
group = '"group":"{}",'
options = '"--options":"\\"{}\\"",'
#misc strings
ctrl_item_separator ='\n '
#arch specific strings
arch_genopts ='MMU=1,all_cacheable=1'
| [
"jwang1@futurewei.com"
] | jwang1@futurewei.com |
447168b09a1040cc0c9136e96eeca844264de0f3 | 7d90d2ce27c6ee0af74391b09909edbd45fdc2f0 | /renix_py_api/api_gen/Y1564WriteDbCommand_Autogen.py | 9ed8e710f2aa1c0339673ae3a451c12aa7ad3772 | [] | no_license | gaoxingyu-hub/54testframework-master-e284 | d7ea0d4a715b65c8652430e963a86b9522a7237a | 57dd2197e7d91b8ad8fb2bd0e3503f10afa08544 | refs/heads/master | 2023-04-30T05:50:41.542402 | 2021-05-28T09:19:37 | 2021-05-28T09:19:37 | 309,922,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,057 | py | """
Auto-generated File
Create Time: 2019-12-27 02:33:28
"""
from .ROMEnum_Autogen import *
from renix_py_api.renix_common_api import *
from renix_py_api import rom_manager
from .BenchmarkWriteDbCommand_Autogen import BenchmarkWriteDbCommand
@rom_manager.rom
class Y1564WriteDbCommand(BenchmarkWriteDbCommand):
def __init__(self, LoadUnit=None, **kwargs):
self._LoadUnit = LoadUnit # Load Unit
properties = kwargs.copy()
if LoadUnit is not None:
properties['LoadUnit'] = LoadUnit
# call base class function, and it will send message to renix server to create a class.
super(Y1564WriteDbCommand, self).__init__(**properties)
@property
def LoadUnit(self):
"""
get the value of property _LoadUnit
"""
return self._LoadUnit
@LoadUnit.setter
def LoadUnit(self, value):
self._LoadUnit = value
def _set_loadunit_with_str(self, value):
seperate = value.find(':')
exec('self._LoadUnit = EnumLoadUnit.%s' % value[:seperate])
| [
"gaoxingyu@example.com"
] | gaoxingyu@example.com |
8c852ac43471e8ad0a70cfc5f47022b6a0c31f66 | 0dce852e5749cf0ece41d167da3107ea6b09bf94 | /pyggi/gtk3__GdkWindowAttr.py | 52bf0e88fe4fbaa2189abeeb8aa410e77ae5078e | [] | no_license | jaykhopale/pywebkit3 | 515b5e89738689bf1e40101e160db053cb5a5d29 | 2046414c34d29e6d495046d764554696dc7c0965 | refs/heads/master | 2021-01-18T03:13:25.073633 | 2012-12-13T18:43:34 | 2012-12-13T18:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,961 | py | # Copyright, John Rusnak, 2012
# This code binding is available under the license agreement of the LGPL with
# an additional constraint described below,
# and with the understanding that the webkit API is copyright protected
# by Apple Computer, Inc. (see below).
# There is an additional constraint that any derivatives of this work aimed
# at providing bindings to GObject, GTK, GDK, or WebKit be strictly
# python-only bindings with no native code.
# * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY
# * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
# * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ******************************************************
# For the API:
# /*
# * Copyright (C) 2006 Apple Computer, Inc. All rights reserved.
# *
# * Redistribution and use in source and binary forms, with or without
# * modification, are permitted provided that the following conditions
# * are met:
# * 1. Redistributions of source code must retain the above copyright
# * notice, this list of conditions and the following disclaimer.
# * 2. Redistributions in binary form must reproduce the above copyright
# * notice, this list of conditions and the following disclaimer in the
# * documentation and/or other materials provided with the distribution.
# *
# * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
# * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
# * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# */
from ctypes import *
from gtk3_types import *
from gtk3_types import *
"""Derived Pointer Types"""
__GtkRcStyle = POINTER(c_int)
__GdkGeometry = POINTER(c_int)
_WebKitNetworkResponse = POINTER(c_int)
_GdkPixbuf = POINTER(c_int)
__GtkRequisition = POINTER(c_int)
_GtkRcStyle = POINTER(c_int)
__GtkRegionFlags = POINTER(c_int)
__WebKitDOMNode = POINTER(c_int)
_GtkWindow = POINTER(c_int)
__cairo_font_options_t = POINTER(c_int)
__JSValue = POINTER(c_int)
_JSContext = POINTER(c_int)
_GtkIconFactory = POINTER(c_int)
__GdkAtom = POINTER(c_int)
__GdkTimeCoord = POINTER(c_int)
_GdkColor = POINTER(c_int)
__GtkWidgetPath = POINTER(c_int)
__GClosure = POINTER(c_int)
__GMainContext = POINTER(c_int)
_GdkDisplay = POINTER(c_int)
__GtkStyleProvider = POINTER(c_int)
_GtkDialog = POINTER(c_int)
__WebKitWebWindowFeatures = POINTER(c_int)
_JSObject = POINTER(c_int)
_GBytes = POINTER(c_int)
_GScanner = POINTER(c_int)
_PangoFont = POINTER(c_int)
_GtkStyleContext = POINTER(c_int)
_GMainContext = POINTER(c_int)
__GtkTextBuffer = POINTER(c_int)
_GtkTargetList = POINTER(c_int)
__WebKitWebSettings = POINTER(c_int)
_GdkAppLaunchContext = POINTER(c_int)
__GObject = POINTER(c_int)
__PangoLayout = POINTER(c_int)
_WebKitWebBackForwardList = POINTER(c_int)
_GtkOffscreenWindow = POINTER(c_int)
__GParamSpec = POINTER(c_int)
__PangoAttrIterator = POINTER(c_int)
_GtkIconSet = POINTER(c_int)
_GtkSelectionData = POINTER(c_int)
_GtkWindowGroup = POINTER(c_int)
_JSGlobalContext = POINTER(c_int)
_PangoLogAttr = POINTER(c_int)
__PangoContext = POINTER(c_int)
__JSPropertyNameArray = POINTER(c_int)
_WebKitWebSettings = POINTER(c_int)
__PangoFont = POINTER(c_int)
__GtkPathPriorityType = POINTER(c_int)
__JSClass = POINTER(c_int)
__WebKitWebHistoryItem = POINTER(c_int)
_JSValue = POINTER(c_int)
__GtkSettings = POINTER(c_int)
_GSource = POINTER(c_int)
__PangoFontMap = POINTER(c_int)
__JSString = POINTER(c_int)
__PangoAttrList = POINTER(c_int)
_PangoMatrix = POINTER(c_int)
__GSource = POINTER(c_int)
_GtkApplication = POINTER(c_int)
__PangoAnalysis = POINTER(c_int)
__GMutex = POINTER(c_int)
_PangoFontDescription = POINTER(c_int)
__GdkCursor = POINTER(c_int)
_GtkBorder = POINTER(c_int)
_WebKitWebInspector = POINTER(c_int)
_GdkWindowAttr = POINTER(c_int)
_GOptionGroup = POINTER(c_int)
__GScanner = POINTER(c_int)
__GtkWidgetClass = POINTER(c_int)
__GdkEventKey = POINTER(c_int)
__GdkDisplay = POINTER(c_int)
_GtkWidgetPath = POINTER(c_int)
_GdkScreen = POINTER(c_int)
_PangoFontMetrics = POINTER(c_int)
__GCond = POINTER(c_int)
_GtkIconSource = POINTER(c_int)
_GdkVisual = POINTER(c_int)
_PangoFontMap = POINTER(c_int)
_GSList = POINTER(c_int)
_WebKitWebFrame = POINTER(c_int)
_JSString = POINTER(c_int)
_GtkWidget = POINTER(c_int)
__WebKitNetworkRequest = POINTER(c_int)
__GdkWindow = POINTER(c_int)
__PangoFontFamily = POINTER(c_int)
__JSContextGroup = POINTER(c_int)
__GPollFD = POINTER(c_int)
__cairo_region_t = POINTER(c_int)
_PangoFontset = POINTER(c_int)
_GdkWindow = POINTER(c_int)
__PangoFontDescription = POINTER(c_int)
__GtkBorder = POINTER(c_int)
__GError = POINTER(c_int)
__PangoCoverage = POINTER(c_int)
_WebKitViewportAttributes = POINTER(c_int)
_JSClass = POINTER(c_int)
_WebKitWebHistoryItem = POINTER(c_int)
__cairo_t = POINTER(c_int)
__GWeakRef = POINTER(c_int)
__GdkVisual = POINTER(c_int)
__GdkEventButton = POINTER(c_int)
_GdkDevice = POINTER(c_int)
__PangoRectangle = POINTER(c_int)
__GtkAccelGroup = POINTER(c_int)
_GObject = POINTER(c_int)
__GtkIconSource = POINTER(c_int)
__JSContext = POINTER(c_int)
_PangoFontsetSimple = POINTER(c_int)
__GtkAllocation = POINTER(c_int)
__GtkWidget = POINTER(c_int)
_PangoLayoutLine = POINTER(c_int)
__GtkIconSet = POINTER(c_int)
_WebKitWebView = POINTER(c_int)
__PangoTabArray = POINTER(c_int)
_WebKitHitTestResult = POINTER(c_int)
__GValue = POINTER(c_int)
_GdkDeviceManager = POINTER(c_int)
_GdkCursor = POINTER(c_int)
_WebKitDOMDocument = POINTER(c_int)
__PangoMatrix = POINTER(c_int)
__GtkPrintOperation = POINTER(c_int)
_PangoContext = POINTER(c_int)
__GList = POINTER(c_int)
__WebKitWebView = POINTER(c_int)
_WebKitWebWindowFeatures = POINTER(c_int)
_PangoCoverage = POINTER(c_int)
_GParamSpec = POINTER(c_int)
_GList = POINTER(c_int)
__GdkRGBA = POINTER(c_int)
__GTimeVal = POINTER(c_int)
__GSourceFuncs = POINTER(c_int)
__JSPropertyNameAccumulator = POINTER(c_int)
__PangoGlyphString = POINTER(c_int)
__JSGlobalContext = POINTER(c_int)
_WebKitSecurityOrigin = POINTER(c_int)
__GObjectClass = POINTER(c_int)
__GSList = POINTER(c_int)
_PangoAnalysis = POINTER(c_int)
__GdkWindowAttr = POINTER(c_int)
_SoupMessage = POINTER(c_int)
_WebKitWebDataSource = POINTER(c_int)
__GdkColor = POINTER(c_int)
_JSContextGroup = POINTER(c_int)
__GdkRectangle = POINTER(c_int)
__PangoLanguage = POINTER(c_int)
_PangoAttrList = POINTER(c_int)
__gunichar = POINTER(c_int)
__GdkWMDecoration = POINTER(c_int)
__PangoLogAttr = POINTER(c_int)
_PangoLayout = POINTER(c_int)
_JSPropertyNameArray = POINTER(c_int)
__JSObject = POINTER(c_int)
_WebKitWebNavigationAction = POINTER(c_int)
_GtkStyle = POINTER(c_int)
__GParameter = POINTER(c_int)
__GtkStyle = POINTER(c_int)
__GIcon = POINTER(c_int)
__GtkWindow = POINTER(c_int)
_PangoLayoutRun = POINTER(c_int)
__cairo_pattern_t = POINTER(c_int)
__GdkPixbuf = POINTER(c_int)
_WebKitGeolocationPolicyDecision = POINTER(c_int)
_GtkSettings = POINTER(c_int)
__GSourceCallbackFuncs = POINTER(c_int)
__GtkTargetEntry = POINTER(c_int)
__GtkApplication = POINTER(c_int)
_GtkClipboard = POINTER(c_int)
_GByteArray = POINTER(c_int)
__GdkScreen = POINTER(c_int)
_PangoLanguage = POINTER(c_int)
__GdkDevice = POINTER(c_int)
_PangoTabArray = POINTER(c_int)
"""Enumerations"""
PangoStyle = c_int
PangoWeight = c_int
PangoVariant = c_int
PangoStretch = c_int
PangoFontMask = c_int
GtkWidgetHelpType = c_int
GtkTextDirection = c_int
GtkSizeRequestMode = c_int
GtkAlign = c_int
GdkPixbufError = c_int
GdkColorspace = c_int
GdkPixbufAlphaMode = c_int
GtkIconSize = c_int
GdkWindowType = c_int
GdkWindowWindowClass = c_int
GdkWindowHints = c_int
GdkGravity = c_int
GdkWindowEdgeh = c_int
GdkWindowTypeHint = c_int
GdkWindowAttributesType = c_int
GdkFilterReturn = c_int
GdkModifierType = c_int
GdkWMDecoration = c_int
GdkWMFunction = c_int
GdkInputSource = c_int
GdkInputMode = c_int
GdkAxisUse = c_int
GdkDeviceType = c_int
GdkGrabOwnership = c_int
GdkCursorType = c_int
GdkVisualType = c_int
GdkByteOrder = c_int
GtkRcFlags = c_int
GtkRcTokenType = c_int
PangoWrapMode = c_int
PangoEllipsizeMode = c_int
PangoAlignment = c_int
WebKitLoadStatus = c_int
WebKitNavigationResponse = c_int
WebKitWebViewTargetInfo = c_int
WebKitWebViewViewMode = c_int
WebKitEditingBehavior = c_int
GdkInputSource = c_int
GdkInputMode = c_int
GdkAxisUse = c_int
GdkDeviceType = c_int
GdkGrabOwnership = c_int
GtkDialogFlags = c_int
GtkResponseType = c_int
WebKitWebNavigationReason = c_int
PangoWrapMode = c_int
PangoEllipsizeMode = c_int
PangoAlignment = c_int
import gobject__GObject
class GdkWindowAttr( gobject__GObject.GObject):
"""Class GdkWindowAttr Constructors"""
def __init__(self, obj = None):
self._object = obj
"""Methods"""
| [
"jrusnak@sensorplatforms.com"
] | jrusnak@sensorplatforms.com |
2a2dded12863725075730f4df216ca6e3c012465 | 679ce4b323f79b2425976201324c6c1f88b95199 | /Python/Who Pays Who/transactions.py | 3265c13f5f84961078cf90b123beb62dcc0eeead | [] | no_license | abriggs914/Coding_Practice | ff690fb5f145a11f4da144f3882b37f473b10450 | 3afd7c59e0d90f0ef5f6203853e69f853312019b | refs/heads/master | 2023-08-31T04:04:58.048554 | 2023-08-29T13:23:29 | 2023-08-29T13:23:29 | 161,865,421 | 0 | 1 | null | 2022-10-27T08:35:29 | 2018-12-15T03:20:14 | Python | UTF-8 | Python | false | false | 13,587 | py | from utility import *
import datetime
AVERY = {"LET": "A", "BAL": 0.0, "OWES": 0.0, "PMTS": []}
KRISTEN = {"LET": "K", "BAL": 0.0, "OWES": 0.0, "PMTS": []}
EMILY = {"LET": "E", "BAL": 0.0, "OWES": 0.0, "PMTS": []}
HAYLEY = {"LET": "H", "BAL": 0.0, "OWES": 0.0, "PMTS": []}
MOM = {"LET": "M", "BAL": 0.0, "OWES": 0.0, "PMTS": []}
DAD = {"LET": "D", "BAL": 0.0, "OWES": 0.0, "PMTS": []}
POT = {"LET": "P", "BAL": 0.0, "OWES": 0.0, "PMTS": []}
def who(name_str):
global AVERY, KRISTEN, EMILY, HAYLEY, MOM, DAD, POT
payers = []
for let in name_str:
if let == AVERY["LET"]:
payers.append(AVERY)
elif let == KRISTEN["LET"]:
payers.append(KRISTEN)
elif let == EMILY["LET"]:
payers.append(EMILY)
elif let == HAYLEY["LET"]:
payers.append(HAYLEY)
elif let == MOM["LET"]:
payers.append(MOM)
elif let == DAD["LET"]:
payers.append(DAD)
elif let == POT["LET"]:
payers.append(POT)
return payers
def adjust_OWES(name_str):
res = []
for let in name_str:
person = who(let)[0]
if let != POT["LET"]:
# person.update({"OWES": -person["BAL"]})
person.update({"OWES": person["BAL"]})
else:
person.update({"OWES": person["BAL"]})
res.append(person)
return res
class Payment:
def __init__(self, desc, payer, payee, amount, date):
self.desc = desc
self.payer = payer
self.payee = payee
self.amount = amount
self.date = date
payers = who(payer)
payees = who(payee)
pay_amount = amount / len(payers)
get_amount = amount / len(payees)
for payer in payers:
payer["BAL"] -= pay_amount
payer["PMTS"].append(self)
for payee in payees:
payee["BAL"] += get_amount
payee["PMTS"].append(self)
def __repr__(self):
return "{} from {} to {} on {}".format(self.amount, self.payer, self.payee, self.date)
# payments = [
# Payment("A->P", "A", "P", 100, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
# Payment("H->P", "H", "P", 40, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
# Payment("K->P", "K", "P", 10, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d"))
# ]
# payments = [
# Payment("A->P", "A", "P", 100, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
# Payment("H->P", "H", "P", 40, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
# Payment("K->P", "K", "P", 10, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
# Payment("K->A", "K", "A", 40, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d"))
# ]
#
# payments = [
# Payment("A->H", "A", "H", 15, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
# Payment("H->K", "H", "K", 20, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
# Payment("K->A", "K", "A", 20, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d"))
# ]
#
payments = [
Payment("Mother's Day Supper (Wingo's)", "A", "P", 100, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
Payment("Mother's Day Supper (Wingo's)", "K", "P", 17.58, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
Payment("Mother's Day Present ()", "E", "P", 115.74, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
Payment("Hayley paid Emily", "H", "E", 55, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
Payment("Father's Day Present (Air Fryer)", "K", "P", 170.7, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
Payment("Father's Day Boating", "A", "E", 40, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
Payment("Father's Day Boating", "E", "P", 50, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
Payment("Father's Day Boating", "H", "P", 40, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
Payment("Father's Day Boating", "E", "A", 10, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
Payment("Spotify", "K", "P", 180, datetime.datetime.strptime("2021-08-04", "%Y-%m-%d")),
Payment("Disney+", "A", "P", 89.99, datetime.datetime.strptime("2021-08-04", "%Y-%m-%d"))
]
#
# payments = [
# Payment("Mother's Day Supper (Wingo's)", "AK", "P", 100, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d"))
# ]
#
# payments = [
# Payment("T", "A", "P", 10, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
# Payment("T", "K", "P", 100, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d")),
# Payment("T", "A", "P", 30, datetime.datetime.strptime("2021-05-24", "%Y-%m-%d"))
# ]
#
owe_val = 0
payments_to_pot = []
transactors = []
for payment in payments:
people = who(payment.payer) + who(payment.payee)
transactors += [p for p in people if p not in transactors and p != POT]
if payment.payee == POT["LET"]:
payments_to_pot.append(payment)
owe_val += payment.amount / len(payment.payer)
owe_val /= max(1, len(payments_to_pot))
share = (owe_val * max(1, len(payments_to_pot))) / max(1, len(transactors))
print("owe_val:{}, over {} transactions to the pot.".format(owe_val, len(payments_to_pot)))
print("Share: {}, over {} transactors.".format(share, len(transactors)))
for payer in "AKEHMD":
person = who(payer)[0]
if person["PMTS"]:
person["OWES"] = share + person["BAL"]
else:
person["OWES"] = 0
print("Payer: {}\n{}".format(payer, dict_print(person, "Person")))
# POT["OWES"] = -POT["BAL"]
POT["OWES"] = POT["BAL"]
print("Payer: {}\n{}".format(POT["LET"], dict_print(POT, "Person")))
def who_pays_who(name_str):
people_to_check = who(name_str)
people_who_pay = [person for person in people_to_check if person["OWES"] > 0]
people_who_get = [person for person in people_to_check if person["OWES"] < 0]
print("people_who_pay:", people_who_pay)
print("people_who_get:", people_who_get)
first_pass = True
quitt = 0
quittt = 8
pay_pairs = []
while (people_who_pay and people_who_get) and (first_pass or people_to_check):
# print("fp:", first_pass, ", people_who_check:", "\n\t" + "\n\t".join(list(map(str, people_to_check))))
beg_a = len(people_who_pay)
beg_b = len(people_who_get)
i, j = 0, 0
while i < len(people_who_pay):
person_a = people_who_pay[i]
j = 0
while j < len(people_who_get):
person_b = people_who_get[j]
owe_a = 0 if "OWES" not in person_a else person_a["OWES"]
owe_b = 0 if "OWES" not in person_b else person_b["OWES"]
# print("1 OWES_a: {}, OWES_b: {}, owe_a + owe_b: {}".format(owe_a, owe_b, owe_a + owe_b), "COND:", (not first_pass and (owe_a >= owe_b and ((owe_a + owe_b) > 0)) and (owe_a < 0 or owe_b < 0)))
if owe_a == 0:
people_who_pay.remove(person_a)
people_to_check.remove(person_a)
break
if owe_b == 0:
people_who_get.remove(person_b)
people_to_check.remove(person_b)
break
if owe_a + owe_b == 0:
# A owes what B needs. pay each other, exit pool.
pay_pairs.append((owe_a, person_a, person_b))
people_who_pay.remove(person_a)
people_who_get.remove(person_b)
people_to_check.remove(person_a)
people_to_check.remove(person_b)
# print("\tA removing:", person_a)
# print("\tB removing:", person_b)
break
elif not first_pass and (owe_a >= owe_b and ((owe_a + owe_b) > 0)) and (owe_a < 0 or owe_b < 0):
# A has more to pay than B needs
pay_pairs.append((abs(owe_b), person_a, person_b))
person_a.update({"BAL": person_a["BAL"] + owe_b, "OWES": person_a["OWES"] + owe_b})
person_b.update({"BAL": person_b["BAL"] - owe_a, "OWES": person_b["OWES"] + min(abs(owe_a), abs(owe_b))})
# print("2 OWES_a: {}, OWES_b: {}, owe_a + owe_b: {}".format(person_a["OWES"], person_b["OWES"], person_a["OWES"] + person_b["OWES"]))
#print("\npeople_who_pay: {}\nperson_a: {}\n".format(people_who_pay, person_a), dict_print(person_b, "REMOVING {}".format(person_b["LET"])))
if person_b["OWES"] >= 0:
people_who_get.remove(person_b)
people_to_check.remove(person_b)
# print("\tC removing:", person_b)
break
elif person_a["OWES"] <= 0:
people_who_pay.remove(person_a)
people_to_check.remove(person_a)
# print("\tD removing:", person_a)
break
elif len(people_who_get) == 1:
pay_pairs.append((owe_a, person_a, person_b))
person_b.update({"BAL": person_b["BAL"] - owe_a, "OWES": person_b["OWES"] + owe_a})
people_to_check.remove(person_a)
people_who_pay.remove(person_a)
# else
# print("bal_a: {}, bal_b: {}".format(bal_a, bal_b))
j += 1
i += 1
aft_a = len(people_who_pay)
aft_b = len(people_who_get)
sam_a = beg_a == aft_a
sam_b = beg_b == aft_b
# print(dict_print({
# "beg_a": beg_a,
# "aft_a": aft_a,
# "beg_b": beg_b,
# "aft_b": aft_b,
# "sam_a": sam_a,
# "sam_b": sam_b,
# "first_pass": first_pass,
# "quitt": quitt,
# "pay_pairs": pay_pairs,
# "people_who_pay": people_who_pay,
# "people_who_get": people_who_get,
# "not sam_a or not sam_b": (not sam_a or not sam_b)
# }))
if sam_a and sam_b:
first_pass = False
elif first_pass and (not sam_a or not sam_b):
# print("people_to_check:", people_to_check)
quitt += 1
if quitt == quittt:
raise ValueError("quitt is {}".format(quittt))
else:
first_pass = True
print("Who pays who:\n{}".format("\n".join([
dict_print(
{
"Amount": money(pr[0]),
"Payer": pr[1]["LET"],
"Payee": pr[2]["LET"]
},
"{} -> {}".format(pr[1]["LET"], pr[2]["LET"])
) for pr in pay_pairs
])))
#
# def who_pays_who(name_str):
# people_to_check = who(name_str)
# people_who_pay = [person for person in people_to_check if person["BAL"] > 0]
# people_who_get = [person for person in people_to_check if person["BAL"] < 0]
# print("people_who_pay:", people_who_pay)
# print("people_who_get:", people_who_get)
# first_pass = True
# quitt = 0
# quittt = 8
# pay_pairs = []
# while (people_who_pay and people_who_get) and (first_pass or people_to_check):
# beg_a = len(people_who_pay)
# beg_b = len(people_who_get)
# i, j = 0, 0
# while i < len(people_who_pay):
# person_a = people_who_pay[i]
# while j < len(people_who_get):
# person_b = people_who_get[j]
# bal_a = person_a["BAL"]
# bal_b = person_b["BAL"]
# owe_a = 0 if "OWES" not in person_a else person_a["OWES"]
# owe_b = 0 if "OWES" not in person_b else person_b["OWES"]
# if bal_a == owe_b:
# pay_pairs.append((bal_a, person_a, person_b))
# people_who_pay.remove(person_a)
# people_who_get.remove(person_b)
# people_to_check.remove(person_a)
# people_to_check.remove(person_b)
# elif not first_pass and bal_a > owe_b:
# pay_pairs.append((owe_b, person_a, person_b))
# person_a["BAL"] -= owe_b
# people_who_get.remove(person_b)
# people_to_check.remove(person_b)
# # else
#
# # print("bal_a: {}, bal_b: {}".format(bal_a, bal_b))
# j += 1
# i += 1
# aft_a = len(people_who_pay)
# aft_b = len(people_who_get)
# sam_a = beg_a == aft_a
# sam_b = beg_b == aft_b
# # print(dict_print({
# # "beg_a": beg_a,
# # "aft_a": aft_a,
# # "beg_b": beg_b,
# # "aft_b": aft_b,
# # "sam_a": sam_a,
# # "sam_b": sam_b,
# # "first_pass": first_pass,
# # "quitt": quitt,
# # "pay_pairs": pay_pairs,
# # "people_who_pay": people_who_pay,
# # "people_who_get": people_who_get,
# # "not sam_a or not sam_b": (not sam_a or not sam_b)
# # }))
# if sam_a and sam_b:
# first_pass = False
# elif first_pass and (not sam_a or not sam_b):
# # print("people_to_check:", people_to_check)
# quitt += 1
# if quitt == quittt:
# raise ValueError("quitt is {}".format(quittt))
# else:
# first_pass = True
#
# print("Who pays who:\n{}".format("\n".join([str(pr) for pr in pay_pairs])))
# who_pays_who("AKEHP")
who_pays_who("AKEH")
| [
"abriggs1@unb.ca"
] | abriggs1@unb.ca |
d2470fc2125cc86a23962c50483ae31d9ed0dfda | 6c584706e6eab645e11357bde8f393013c69e4c9 | /Ingestão de Dados/Aula 3/gerador_log_web.py | 3f51aeffdcf1a17eef5f714f422ad73ba2f38853 | [] | no_license | charlesartbr/fiap-mba-big-data-data-science | cce1b64c301187a049cd9929d5fafd7e6985503e | de4d8372a7ce26ac8e4556925416e5c9e1932020 | refs/heads/master | 2022-09-05T00:33:21.367281 | 2022-08-09T14:01:28 | 2022-08-09T14:01:28 | 185,289,505 | 0 | 1 | null | 2021-01-15T16:47:00 | 2019-05-07T00:10:35 | Jupyter Notebook | UTF-8 | Python | false | false | 1,772 | py | #!/usr/bin/python
import time
import datetime
import random
timestr = time.strftime("%Y%m%d-%H%M%S")
f = open('/tmp/access_log','w',0)
ips=["123.221.14.56","16.180.70.237","10.182.189.79","218.193.16.244","198.122.118.164","114.214.178.92","233.192.62.103","244.157.45.12","81.73.150.239","237.43.24.118"]
referers=["-","http://www.casualcyclist.com","http://bestcyclingreviews.com/top_online_shops","http://bleater.com","http://searchengine.com"]
resources=["/handle-bars","/stems","/wheelsets","/forks","/seatposts","/saddles","/shifters","/Store/cart.jsp?productID="]
useragents=["Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1944.0 Safari/537.36","Mozilla/5.0 (Linux; U; Android 2.3.5; en-us; HTC Vision Build/GRI40) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1","Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25","Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201","Mozilla/5.0 (Windows NT 5.1; rv:31.0) Gecko/20100101 Firefox/31.0","Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))"]
otime = datetime.datetime(2013,10,10)
while 1:
time.sleep(0.1)
increment = datetime.timedelta(seconds=random.randint(30,300))
otime += increment
uri = random.choice(resources)
if uri.find("Store")>0:
uri += `random.randint(1000,1500)`
ip = random.choice(ips)
useragent = random.choice(useragents)
referer = random.choice(referers)
f.write('%s - - [%s] "GET %s HTTP/1.0" 200 %s "%s" "%s"\n' % (random.choice(ips),otime.strftime('%d/%b/%Y:%H:%M:%S %z'),uri,random.randint(2000,5000),referer,useragent))
| [
"e-mail@charles.art.br"
] | e-mail@charles.art.br |
e92b785b30219e424c68dd12f50f77ba0a0099e8 | 3784495ba55d26e22302a803861c4ba197fd82c7 | /venv/lib/python3.6/site-packages/nltk/test/unit/test_corpora.py | bce083b9088d1cb8befa7ec22f762c1a1e1b71d6 | [
"MIT"
] | permissive | databill86/HyperFoods | cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789 | 9267937c8c70fd84017c0f153c241d2686a356dd | refs/heads/master | 2021-01-06T17:08:48.736498 | 2020-02-11T05:02:18 | 2020-02-11T05:02:18 | 241,407,659 | 3 | 0 | MIT | 2020-02-18T16:15:48 | 2020-02-18T16:15:47 | null | UTF-8 | Python | false | false | 9,484 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import unittest
from nltk.corpus import (
sinica_treebank,
conll2007,
indian,
cess_cat,
cess_esp,
floresta,
ptb,
udhr,
) # mwa_ppdb
from nltk.compat import python_2_unicode_compatible
from nltk.tree import Tree
from nltk.test.unit.utils import skipIf
class TestUdhr(unittest.TestCase):
def test_words(self):
for name in udhr.fileids():
try:
words = list(udhr.words(name))
except AssertionError:
print(name)
raise
self.assertTrue(words)
def test_raw_unicode(self):
for name in udhr.fileids():
txt = udhr.raw(name)
assert not isinstance(txt, bytes), name
class TestIndian(unittest.TestCase):
def test_words(self):
words = indian.words()[:3]
self.assertEqual(words, ['মহিষের', 'সন্তান', ':'])
def test_tagged_words(self):
tagged_words = indian.tagged_words()[:3]
self.assertEqual(
tagged_words, [('মহিষের', 'NN'), ('সন্তান', 'NN'), (':', 'SYM')]
)
class TestCess(unittest.TestCase):
def test_catalan(self):
words = cess_cat.words()[:15]
txt = "El Tribunal_Suprem -Fpa- TS -Fpt- ha confirmat la condemna a quatre anys d' inhabilitació especial"
self.assertEqual(words, txt.split())
self.assertEqual(cess_cat.tagged_sents()[0][34][0], "càrrecs")
def test_esp(self):
words = cess_esp.words()[:15]
txt = "El grupo estatal Electricité_de_France -Fpa- EDF -Fpt- anunció hoy , jueves , la compra del"
self.assertEqual(words, txt.split())
self.assertEqual(cess_esp.words()[115], "años")
class TestFloresta(unittest.TestCase):
def test_words(self):
words = floresta.words()[:10]
txt = "Um revivalismo refrescante O 7_e_Meio é um ex-libris de a"
self.assertEqual(words, txt.split())
class TestSinicaTreebank(unittest.TestCase):
def test_sents(self):
first_3_sents = sinica_treebank.sents()[:3]
self.assertEqual(
first_3_sents, [['一'], ['友情'], ['嘉珍', '和', '我', '住在', '同一條', '巷子']]
)
def test_parsed_sents(self):
parsed_sents = sinica_treebank.parsed_sents()[25]
self.assertEqual(
parsed_sents,
Tree(
'S',
[
Tree('NP', [Tree('Nba', ['嘉珍'])]),
Tree('V‧地', [Tree('VA11', ['不停']), Tree('DE', ['的'])]),
Tree('VA4', ['哭泣']),
],
),
)
class TestCoNLL2007(unittest.TestCase):
# Reading the CoNLL 2007 Dependency Treebanks
def test_sents(self):
sents = conll2007.sents('esp.train')[0]
self.assertEqual(
sents[:6], ['El', 'aumento', 'del', 'índice', 'de', 'desempleo']
)
def test_parsed_sents(self):
parsed_sents = conll2007.parsed_sents('esp.train')[0]
self.assertEqual(
parsed_sents.tree(),
Tree(
'fortaleció',
[
Tree(
'aumento',
[
'El',
Tree(
'del',
[
Tree(
'índice',
[
Tree(
'de',
[Tree('desempleo', ['estadounidense'])],
)
],
)
],
),
],
),
'hoy',
'considerablemente',
Tree(
'al',
[
Tree(
'euro',
[
Tree(
'cotizaba',
[
',',
'que',
Tree('a', [Tree('15.35', ['las', 'GMT'])]),
'se',
Tree(
'en',
[
Tree(
'mercado',
[
'el',
Tree('de', ['divisas']),
Tree('de', ['Fráncfort']),
],
)
],
),
Tree('a', ['0,9452_dólares']),
Tree(
'frente_a',
[
',',
Tree(
'0,9349_dólares',
[
'los',
Tree(
'de',
[
Tree(
'mañana',
['esta'],
)
],
),
],
),
],
),
],
)
],
)
],
),
'.',
],
),
)
@skipIf(not ptb.fileids(), "A full installation of the Penn Treebank is not available")
class TestPTB(unittest.TestCase):
def test_fileids(self):
self.assertEqual(
ptb.fileids()[:4],
[
'BROWN/CF/CF01.MRG',
'BROWN/CF/CF02.MRG',
'BROWN/CF/CF03.MRG',
'BROWN/CF/CF04.MRG',
],
)
def test_words(self):
self.assertEqual(
ptb.words('WSJ/00/WSJ_0003.MRG')[:7],
['A', 'form', 'of', 'asbestos', 'once', 'used', '*'],
)
def test_tagged_words(self):
self.assertEqual(
ptb.tagged_words('WSJ/00/WSJ_0003.MRG')[:3],
[('A', 'DT'), ('form', 'NN'), ('of', 'IN')],
)
def test_categories(self):
self.assertEqual(
ptb.categories(),
[
'adventure',
'belles_lettres',
'fiction',
'humor',
'lore',
'mystery',
'news',
'romance',
'science_fiction',
],
)
def test_news_fileids(self):
self.assertEqual(
ptb.fileids('news')[:3],
['WSJ/00/WSJ_0001.MRG', 'WSJ/00/WSJ_0002.MRG', 'WSJ/00/WSJ_0003.MRG'],
)
def test_category_words(self):
self.assertEqual(
ptb.words(categories=['humor', 'fiction'])[:6],
['Thirty-three', 'Scotty', 'did', 'not', 'go', 'back'],
)
@unittest.skip("Skipping test for mwa_ppdb.")
class TestMWAPPDB(unittest.TestCase):
def test_fileids(self):
self.assertEqual(
mwa_ppdb.fileids(), ['ppdb-1.0-xxxl-lexical.extended.synonyms.uniquepairs']
)
def test_entries(self):
self.assertEqual(
mwa_ppdb.entries()[:10],
[
('10/17/01', '17/10/2001'),
('102,70', '102.70'),
('13,53', '13.53'),
('3.2.5.3.2.1', '3.2.5.3.2.1.'),
('53,76', '53.76'),
('6.9.5', '6.9.5.'),
('7.7.6.3', '7.7.6.3.'),
('76,20', '76.20'),
('79,85', '79.85'),
('93,65', '93.65'),
],
)
# unload corpora
from nltk.corpus import teardown_module
| [
"luis20dr@gmail.com"
] | luis20dr@gmail.com |
e7599f9ae060d3fafce863bf73c9090ed1280934 | 5a7b15eb2a3453475ee70bb56e19a7bb2751db89 | /code/analysis/NOT_USED/quantiles/yStudyTradeoff_Bootstrap_Parallel_OnlyWordForms_BoundedVocab_BinomialTest_Single_UnimodalBoundOnQuantile_BothDirections_NoAssumption_All.py | ccb97759647aaf85ec002925f489aef6d4a151e4 | [] | no_license | m-hahn/memory-surprisal | 8db19bc86ada9c352feb66859f718749623700b6 | 1b3d680836ba87fb9186741a8d4f184fda35b122 | refs/heads/master | 2022-04-30T16:01:39.323884 | 2022-03-25T04:10:12 | 2022-03-25T04:10:12 | 156,466,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | from ud_languages import languages
import subprocess
# ./python27 yStudyTradeoff_Bootstrap_Parallel_OnlyWordForms_BoundedVocab_HistogramsByMem_All.py > ../results/tradeoff/listener-curve-histogram_byMem.tsv
with open("../../results/tradeoff/listener-curve-binomial-confidence-bound-quantile-noAssumption.tsv", "w") as outFile:
print >> outFile, "\t".join(["Language", "Type", "Position", "LowerConfidenceBound", "Level", "Memory"])
for language in languages:
print(language)
print >> outFile, subprocess.check_output(["./python27", "yStudyTradeoff_Bootstrap_Parallel_OnlyWordForms_BoundedVocab_BinomialTest_Single_UnimodalBoundOnQuantile_BothDirections_NoAssumption.py", "--language", language, "--level", "0.001"]).strip()
| [
"mhahn29@gmail.com"
] | mhahn29@gmail.com |
3b15e2c536dd3217d18951bca339e724dc79bc96 | 250b997d715c168315a927e28124cf24c77048c0 | /python3基础/3.Python修炼第三层/day3_预习/文件处理_预习.py | 7df721ad73b651c2138d41d3c9be75f122680a17 | [] | no_license | cuitianfeng/Python | c78077e5dcad01ee5fe44c0aa8b61bbc2fa388cf | 9c9f10f13311116ce0bc60ec128f765ff2ca3078 | refs/heads/master | 2023-01-10T23:25:57.158141 | 2020-11-17T15:39:36 | 2020-11-17T15:39:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,724 | py |
# python基础之文件处理
# http://www.cnblogs.com/linhaifeng/articles/5984922.html
# 文件处理
# 读
# f=open('a.txt','r',encoding='utf-8') #打开文件
# res=f.read() #read 以光标读取文件所有内容
# print(res)
# print('第二次',f.read()) # 第一次read光标已经读到文件末尾,所以光标无法继续读取
# print(f.readline(),end='') #一行一行的读取文件
# print(f.readlines(),end='') #把文件所有行读取出来 组成列表
#,end='' 取消读取文件末尾的换行符
# f.close() #打开文件操作后,如果不关闭 ,会一直占用资源,一定要关闭文件
#close掉的是操作系统的资源 f 变量依然存在,但是不能发起读写操作了
# del f #回收应用程序 中的 f , 并没有关闭操作系统的打开文件
#打开 操作 单个或者多个文件,操作完后 自动 close 文件
# with open('a.txt','r',encoding='utf-8') as f,open('b.txt') as f1:
# pass
# r 文本模式的读,在文件不存在,不会创建新文件
# f=open('a.txt','r',encoding='utf-8') #应用程序指定一个f 变量(对象), 操作系统打开一个文件
# f.read() #应用程序发起读取文件指令,操作系统来操作去硬盘读取内容然后返回给 f
# print(f.read())
# f.readline() #读取一行
# f.readlines() #把所有行取出来,放入列表
# print(f.readable()) #判断文件是否 可读
# print(f.writable()) #判断问价是否 可写
# f.close()
# 写
# f=open('a.txt','w',encoding='utf-8') # w 写 如果文件不存在就创建文件,如果存在就清空文件
# f.write('1111\n') #每次写都是先清空在 重新写
# f.write('2222\n') #每次写都是先清空在 重新写
# f.write('3333\n4444\n') #每次写都是先清空在 重新写
# f.writelines(['a\n','b\n','c\n'])
# f.write()
# f.close()
# w 文本模式的写,文件存在则清空,不存在则创建
# f=open('a.txt','w',encoding='utf-8')
# print(f.readable()) #判断文件是否 可读
# print(f.writable()) #判断问价是否 可写
# f.write('哈哈哈哈\n') #一行 添加
# f.write('哈哈哈哈\n')
#
# f.writelines(['1111\n','2222\n']) #列表添加
# f.close()
# 追加
# a 文本模式的追加,文件存在光标跳到文件末尾,文件不存在创建,
# f=open('b.txt','a',encoding='utf-8')
# # print(f.writable()) #判断问价是否 可写
# # print(f.tell()) #查看文件光标的位置
# f.write('333\n')
# f.write('4444\n')
# f.close()
# r+,w+,a+ 不常用的模式
# rb
# rb 模式即直接从硬盘中读取bytes ,不用指定编码
# f=open('a.txt','rb')
# # print(f.read())
# print(f.read().decode('utf-8'))
# f.close()
# wb
# wb 模式,一定不用指定编码
# f=open('a.txt','wb')
# f.write('你好啊'.encode('utf-8'))
# f.close()
# ab 模式,每次写都要 encode
#回收、关闭文件
# f.close() #打开文件操作后,如果不关闭 ,会一直占用资源,一定要关闭文件
#close掉的是操作系统的资源 f 变量依然存在,但是不能发起读写操作了
# del f #回收应用程序 中的 f , 并没有关闭操作系统打开的文件
# f.close() #关闭操作系统打开的文件
# close之后 f 依然存在
# close之后 f.read 是无法操作的,因为read是往操作系统发请求,而系统已经关闭这个文件
#打开 操作 单个或者多个文件,操作完后 自动 close 文件
# with open('a.txt','r',encoding='utf-8') as f,open('b.txt') as f1:
# pass
# with open('file.txt','w',encoding='utf-8') as f:
# f.write('111\n')
#文本格式以外的文件
# f=open('test.jpg','rb')
# print(f.read())
#
# with open('test.jpg','rb') as read_f,open('test1.jpg','wb') as write_f:
# # write_f.write(read_f.read())
# for line in read_f:
# write_f.write(line)
# 修改文件
# Vim 原理 修改文件 vim就是一次全部读取文件
# import os
# with open('old.txt','r',encoding='utf-8') as read_f,\
# open('.old.txt.swap','w',encoding='utf-8') as write_f:
# msg=read_f.read()
# # print(msg,type(msg))
# msg=msg.replace('alex','SB')
# # print(msg)
# write_f.write(msg)
# os.remove('old.txt')
# os.rename('.old.txt.swap','old.txt')
# 如果文件过大 推荐一行一行的读取
# 换成读取文件时 一行一行读取文件 再修改
# import os
# with open('old.txt','r',encoding='utf-8') as read_f,\
# open('.old.txt.swap','w',encoding='utf-8') as write_f:
# for line in read_f:
# if 'SB' in line:
# line=line.replace('SB','alex')
# write_f.write(line)
# os.remove('old.txt')
# os.rename('.old.txt.swap','old.txt')
# 文件读取 写入列表 转成字典
# l={}
# f = open('a.txt','r',encoding='utf-8')
# u = f.readlines()
# print(u,type(u))
# for i in u:
# i = i.strip()
# print(i)
# # print(i.split(' ')[0])
# # print(i.split(' ')[1])
# l[i.split(' ')[0]]={'金额':i.split(' ')[1]}
# print(l)
# print(l['www']['金额'])
# 脚本传参实现拷贝文件
# import sys
#
# #python3 copy.py source.file target.file
# if len(sys.argv) < 3:
# print('usage:python3 copy.py source.file target.file')
# sys.exit()
#
# #r'D:\python编码\py_s19\day3\old.txt' windows路径问题加r r是原生字符串
# with open(r'%s' %sys.argv[1],'rb') as read_f,\
# open(r'%s' %sys.argv[2],'wb') as write_f:
#
# for line in read_f:
# write_f.write(line)
# 文件其他操作
# f=open('a.txt','r',encoding='utf-8')
# print(f.read(3)) # 读3个字符
# f=open('a.txt','rb')
# print(f.read(3)) # 读3个字节
# print(f.read(3).decode('utf-8')) # 解码读 unicode 3个字节存的中文
# f=open('a.txt','r',encoding='utf-8')
# print(f.read())
# # f.seek(0) # 定义光标位置 重置到0
# f.seek(3)
# print(f.tell()) #以字节显示光标位置
# print(f.read())
# seek有三种移动方式0,1,2,其中1和2必须在b模式下进行,但无论哪种模式,都是以bytes为单位移动的
# 0
# f=open('a.txt','rb')
# print(f.read(3))
# print(f.tell())
# f.seek(3,0)
# print(f.tell())
# print(f.read(3).decode('utf-8'))
# 1
# f=open('a.txt','rb')
# print(f.read(3))
# print(f.tell())
# f.seek(3,1)
# print(f.tell())
# print(f.read().decode('utf-8'))
# 2
# f=open('a.txt','rb')
# f.seek(0,2) #光标移动至末尾
# print(f.tell())
# python3 tail.py -f access.log
# import time
# import sys
#
# with open(r'%s' %sys.argv[2],'rb') as f:
# f.seek(0,2)
#
# while True:
# line=f.readline()
# if line:
# print(line.decode('utf-8'),end='')
# else:
# time.sleep(0.2)
#模拟文件追加
# with open('access.log','a') as f:
# f.write('1111\n')
# truncate是截断文件,所以文件的打开方式必须可写,但是不能用w或w+等方式打开,因为那样直接清空文件了,所以truncate要在r+或a或a+等模式下测试效果
# with open('a.txt','r+',encoding='utf-8') as f:
# f.truncate(9) #以字节 截取 截取从0到9以内
# 直接循环 一行一行读 内存里面只会存一行
# with open('a.txt','r',encoding='utf-8') as f:
# # l=f.readlines()
# # print(l)
# # for line in l:
# # print(line,end='')
# for line in f:
# print(line)
# l=[1,2,3,4,5]
# for index in range(len(l)):
# # print(index)
# print(l[index])
# for itme in l:
# # print(index)
# print(itme)
l=[1,2,3,'a','b']
print(l[7],'123')
| [
"zhang.hongyang@mydreamplus.com"
] | zhang.hongyang@mydreamplus.com |
1635473539c093851ee9aa249ab11f4706c48386 | 963676ebcbb74cb40344aba93960ab288b75e4cd | /migrations/versions/e6f60380eb4c_add_post_id_columnin_comment_table.py | 12032564a5da2387ff7e7abee4c9dc8999d72478 | [
"MIT"
] | permissive | Anabella1109/MeBlog | 6f93255b550322861cc020fe686beab85b68ac8f | 4e3425a9558926208ad5ce66f83da55bb944f59e | refs/heads/master | 2020-04-25T23:13:20.525790 | 2019-03-05T06:54:35 | 2019-03-05T06:54:35 | 173,137,672 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | """add post_id columnin comment table
Revision ID: e6f60380eb4c
Revises: a002d432a443
Create Date: 2019-03-02 18:05:32.082839
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e6f60380eb4c'
down_revision = 'a002d432a443'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('posts', sa.Column('image', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('posts', 'image')
# ### end Alembic commands ###
| [
"bellaxbx1109@gmail.com"
] | bellaxbx1109@gmail.com |
1ae2355c6ac4a55ad285c5f227692ebf7fea72a8 | e1fada3a9846a5593e3d3d2fdc32b23b832e38b4 | /tests/unit/algorithms/segmentation/adapters/mmseg/test_pipelines.py | d59987c3820cd54ceb70f18d63ed75cf203e1fe8 | [
"Apache-2.0"
] | permissive | GalyaZalesskaya/openvino_training_extensions | fd1ebb189900008b16b85568449e5c62d8edbad5 | 6116639caeff100b06a6c10a96c7e7f5951f20c7 | refs/heads/develop | 2023-09-03T19:32:44.702497 | 2023-03-15T06:48:24 | 2023-03-15T06:48:24 | 202,568,309 | 0 | 0 | Apache-2.0 | 2019-10-28T16:16:27 | 2019-08-15T15:41:59 | Python | UTF-8 | Python | false | false | 4,386 | py | # Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import numpy as np
import PIL.Image
import pytest
from otx.algorithms.segmentation.adapters.mmseg.data.pipelines import (
LoadAnnotationFromOTXDataset,
NDArrayToPILImage,
PILImageToNDArray,
RandomResizedCrop,
RandomSolarization,
)
from otx.api.entities.annotation import (
Annotation,
AnnotationSceneEntity,
AnnotationSceneKind,
)
from otx.api.entities.dataset_item import DatasetItemEntity
from otx.api.entities.image import Image
from otx.api.entities.label import Domain, LabelEntity
from otx.api.entities.scored_label import ScoredLabel
from otx.api.entities.shapes.rectangle import Rectangle
from tests.test_suite.e2e_test_system import e2e_pytest_unit
def label_entity(name="test label") -> LabelEntity:
return LabelEntity(name=name, domain=Domain.SEGMENTATION)
def dataset_item() -> DatasetItemEntity:
image: Image = Image(data=np.random.randint(low=0, high=255, size=(10, 16, 3)))
annotation: Annotation = Annotation(shape=Rectangle.generate_full_box(), labels=[ScoredLabel(label_entity())])
annotation_scene: AnnotationSceneEntity = AnnotationSceneEntity(
annotations=[annotation], kind=AnnotationSceneKind.ANNOTATION
)
return DatasetItemEntity(media=image, annotation_scene=annotation_scene)
class TestLoadAnnotationFromOTXDataset:
@pytest.fixture(autouse=True)
def setUp(self) -> None:
self.dataset_item: DatasetItemEntity = dataset_item()
self.results: dict = {
"dataset_item": self.dataset_item,
"ann_info": {"labels": [label_entity("class_1")]},
"seg_fields": [],
}
self.pipeline: LoadAnnotationFromOTXDataset = LoadAnnotationFromOTXDataset()
@e2e_pytest_unit
def test_call(self) -> None:
loaded_annotations: dict = self.pipeline(self.results)
assert "gt_semantic_seg" in loaded_annotations
assert loaded_annotations["dataset_item"] == self.dataset_item
class TestNDArrayToPILImage:
@pytest.fixture(autouse=True)
def setUp(self) -> None:
self.results: dict = {"img": np.random.randint(0, 255, (3, 3, 3), dtype=np.uint8)}
self.nd_array_to_pil_image: NDArrayToPILImage = NDArrayToPILImage(keys=["img"])
@e2e_pytest_unit
def test_call(self) -> None:
converted_img: dict = self.nd_array_to_pil_image(self.results)
assert "img" in converted_img
assert isinstance(converted_img["img"], PIL.Image.Image)
@e2e_pytest_unit
def test_repr(self) -> None:
assert str(self.nd_array_to_pil_image) == "NDArrayToPILImage"
class TestPILImageToNDArray:
@pytest.fixture(autouse=True)
def setUp(self) -> None:
self.results: dict = {"img": PIL.Image.new("RGB", (3, 3))}
self.pil_image_to_nd_array: PILImageToNDArray = PILImageToNDArray(keys=["img"])
@e2e_pytest_unit
def test_call(self) -> None:
converted_array: dict = self.pil_image_to_nd_array(self.results)
assert "img" in converted_array
assert isinstance(converted_array["img"], np.ndarray)
@e2e_pytest_unit
def test_repr(self) -> None:
assert str(self.pil_image_to_nd_array) == "PILImageToNDArray"
class TestRandomResizedCrop:
@pytest.fixture(autouse=True)
def setUp(self) -> None:
self.results: dict = {"img": PIL.Image.new("RGB", (10, 16)), "img_shape": (10, 16), "ori_shape": (10, 16)}
self.random_resized_crop: RandomResizedCrop = RandomResizedCrop((5, 5), (0.5, 1.0))
@e2e_pytest_unit
def test_call(self) -> None:
cropped_img: dict = self.random_resized_crop(self.results)
assert cropped_img["img_shape"] == (5, 5)
assert cropped_img["ori_shape"] == (10, 16)
class TestRandomSolarization:
@pytest.fixture(autouse=True)
def setUp(self) -> None:
self.results: dict = {"img": np.random.randint(0, 255, (3, 3, 3), dtype=np.uint8)}
self.random_solarization: RandomSolarization = RandomSolarization(p=1.0)
@e2e_pytest_unit
def test_call(self) -> None:
solarized: dict = self.random_solarization(self.results)
assert "img" in solarized
assert isinstance(solarized["img"], np.ndarray)
@e2e_pytest_unit
def test_repr(self) -> None:
assert str(self.random_solarization) == "RandomSolarization"
| [
"noreply@github.com"
] | GalyaZalesskaya.noreply@github.com |
b5d111e5042cf79311a90c8070f79acc4fb07e98 | 49e04eaa863f16f23378d628d8c8da3e2f4328ec | /types.py | 6bbf5e9a8e558f93f9b1391cbcd8965f32cf5817 | [] | no_license | cheery/hindley-milner-tutorial | 5f58af47fff9686f5dfd23130a7bdf97ccae19ed | 40debaa36b2cc2dd82a73cf8580c8e810698afb4 | refs/heads/master | 2021-01-01T18:38:32.396766 | 2013-07-23T14:14:08 | 2013-07-23T14:14:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,551 | py | class Variable(object):
def __init__(self):
self.instance = None
class Type(object):
def __init__(self, name, types):
self.name = name
self.types = types
class Generic(object):
def __init__(self, kind):
self.kind = kind
def prune(t):
while isinstance(t, Variable) and t.instance:
t = t.instance
return t
def inside(v, t):
if isinstance(t, Type):
return any(inside(v, x) for x in t.types)
return v == t
def fresh(t, mappings=None):
mappings = {} if mappings is None else mappings
t = prune(t)
if isinstance(t, Generic):
if t.kind not in mappings:
mappings[t.kind] = Variable()
return mappings[t.kind]
if isinstance(t, Variable):
return t
if isinstance(t, Type):
return Type(t.name, [fresh(x, mappings) for x in t.types])
def unify_var(v, t):
if v != t:
if inside(v, t):
raise Exception("recursive unification")
v.instance = t
def unify_types(a, b):
if a.name != b.name or len(a.types) != len(b.types):
raise Exception("type mismatch %s/%i != %s/%i" % (a.name, len(a.types), b.name, len(b.types))
for p, q in zip(a.types, b.types):
unify(p, q)
def unify(a, b):
a = prune(a)
b = prune(b)
if isinstance(a, Variable):
unify_var(a, b)
if isinstance(b, Variable):
unify_var(b, a)
if isinstance(a, Type) and isinstance(b, Type):
unify_types(a, b)
###Integer = op('Integer', [])
###Boolean = op('Boolean', [])
| [
"cheery@boxbase.org"
] | cheery@boxbase.org |
f015bf5f10b97ea6b3dda3c8716fabd25c42417d | 3ffdea18c24ed90672cc8414cba6b769d757667d | /tests/test_api.py | a73f70aead6d4354ff8fb63f5dc4ab817ea93f8d | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | msauria/cooler | 0b26a07e3b42b0bddbc9fc15aef18dee65010b9b | d9257919d3aee1bbbe700e8154f9f73354e416cb | refs/heads/master | 2021-01-22T06:18:27.266357 | 2017-05-25T04:15:31 | 2017-05-25T04:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,322 | py | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from scipy import sparse
import numpy as np
import pandas
import h5py
from nose.tools import assert_raises
import cooler.api
import mock
class MockHDF5(dict):
file = mock.Mock(['mode'])
binsize = 100
n_bins = 20
r = sparse.random(n_bins, n_bins, density=1, random_state=1)
r = sparse.triu(r, k=1).tocsr()
r_full = r.toarray() + r.toarray().T
mock_cooler = MockHDF5({
'chroms': {
'name': np.array(['chr1', 'chr2'], dtype='S'),
'length': np.array([1000, 1000], dtype=np.int32),
},
'bins': {
'chrom': np.array([0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,1], dtype=int),
'start': np.array([0,100,200,300,400,500,600,700,800,900,
0,100,200,300,400,500,600,700,800,900],
dtype=int),
'end': np.array([100,200,300,400,500,600,700,800,900,1000,
100,200,300,400,500,600,700,800,900,1000],
dtype=int),
'mask': np.array([1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1], dtype=bool),
'bias': np.array([1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1], dtype=float),
'E1': np.zeros(20, dtype=float),
},
'pixels': {
'bin1_id': r.tocoo().row,
'bin2_id': r.indices,
'count': r.data,
'mask': np.ones(r.nnz, dtype=bool),
},
'indexes': {
'chrom_offset': np.array([0, 10, 20], dtype=np.int32), # nchroms + 1
'bin1_offset': r.indptr, # nbins + 1
},
})
mock_cooler.attrs = {
'bin-size': binsize,
'bin-type': 'fixed',
'nchroms': 2,
'nbins': n_bins,
'nnz': r.nnz,
'metadata': '{}',
}
mock_cooler.file = mock_cooler
mock_cooler.file.mode = 'r'
mock_cooler.file.filename = 'mock.cool'
mock_cooler.name = '/'
mock_cooler['/'] = mock_cooler
chromID_lookup = pandas.Series({'chr1': 0, 'chr2': 1})
def test_get():
table = cooler.api.get(mock_cooler['chroms'])
assert np.all(table['length'] == mock_cooler['chroms']['length'])
def test_chromtable():
table = cooler.api.chroms(mock_cooler)
assert np.all(table['length'] == mock_cooler['chroms']['length'])
def test_bintable():
lo, hi = 2, 10
table = cooler.api.bins(mock_cooler, lo, hi)
assert np.all(chromID_lookup[table['chrom']] == mock_cooler['bins']['chrom'][lo:hi])
assert np.all(table['start'] == mock_cooler['bins']['start'][lo:hi])
assert np.all(table['end'] == mock_cooler['bins']['end'][lo:hi])
def test_pixeltable():
lo, hi = 2, 10
table = cooler.api.pixels(mock_cooler, lo, hi, join=False)
assert np.all(table['bin1_id'] == mock_cooler['pixels']['bin1_id'][lo:hi])
assert np.all(table['bin2_id'] == mock_cooler['pixels']['bin2_id'][lo:hi])
table = cooler.api.pixels(mock_cooler, lo, hi, join=True)
assert table.shape == (hi-lo, len(mock_cooler['pixels']) + 4)
def test_info():
pass
def test_cooler():
c = cooler.Cooler(mock_cooler)
# bin table
table = c.bins().fetch('chr1')
assert np.all(table['start'] == mock_cooler['bins']['start'][0:10])
assert np.all(table['end'] == mock_cooler['bins']['end'][0:10])
# offsets
assert c.offset('chr1') == 0
assert c.extent('chr1') == (0, 10)
# 2D range queries as rectangular or triangular
A1 = np.triu(c.matrix(balance=False).fetch('chr2'))
df = c.matrix(as_pixels=True, join=False, balance=False).fetch('chr2')
i0 = c.offset('chr2')
i, j, v = df['bin1_id'], df['bin2_id'], df['count']
mat = sparse.coo_matrix((v, (i-i0, j-i0)), (A1.shape))
A2 = np.triu(mat.toarray())
assert np.all(A1 == A2)
def test_annotate():
c = cooler.Cooler(mock_cooler)
# works with full bin table / view or only required bins
df = c.matrix(as_pixels=True, balance=False).fetch('chr1')
df1 = cooler.annotate(df, c.bins()[:])
df2 = cooler.annotate(df, c.bins())
df3 = cooler.annotate(df, c.bins().fetch('chr1'))
assert np.all(df1 == df2)
assert np.all(df1 == df3)
# works on empty dataframe
df4 = cooler.annotate(df[0:0], c.bins()[:])
assert np.all(df4.columns == df3.columns)
assert len(df4) == 0
| [
"nabdennur@gmail.com"
] | nabdennur@gmail.com |
fe917e43a825ab3b35bc5f39d38ccea00d17fe35 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/cloudscale_ch/cloud/plugins/modules/server_group.py | ce68a6bcdeeb1be48c536fd6592cbd6dac7f9382 | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 4,241 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: server_group
short_description: Manages server groups on the cloudscale.ch IaaS service
description:
- Create, update and remove server groups.
author:
- René Moser (@resmo)
- Denis Krienbühl (@href)
version_added: "1.0.0"
options:
name:
description:
- Name of the server group.
- Either I(name) or I(uuid) is required. These options are mutually exclusive.
type: str
uuid:
description:
- UUID of the server group.
- Either I(name) or I(uuid) is required. These options are mutually exclusive.
type: str
type:
description:
- Type of the server group.
default: anti-affinity
type: str
zone:
description:
- Zone slug of the server group (e.g. C(lgp1) or C(rma1)).
type: str
state:
description:
- State of the server group.
choices: [ present, absent ]
default: present
type: str
tags:
description:
- Tags assosiated with the server groups. Set this to C({}) to clear any tags.
type: dict
extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
'''
EXAMPLES = '''
---
- name: Ensure server group exists
cloudscale_ch.cloud.server_group:
name: my-name
type: anti-affinity
api_token: xxxxxx
- name: Ensure server group in a specific zone
cloudscale_ch.cloud.server_group:
name: my-rma-group
type: anti-affinity
zone: lpg1
api_token: xxxxxx
- name: Ensure a server group is absent
cloudscale_ch.cloud.server_group:
name: my-name
state: absent
api_token: xxxxxx
'''
RETURN = '''
---
href:
description: API URL to get details about this server group
returned: if available
type: str
sample: https://api.cloudscale.ch/v1/server-group/cfde831a-4e87-4a75-960f-89b0148aa2cc
uuid:
description: The unique identifier for this server
returned: always
type: str
sample: cfde831a-4e87-4a75-960f-89b0148aa2cc
name:
description: The display name of the server group
returned: always
type: str
sample: load balancers
type:
description: The type the server group
returned: if available
type: str
sample: anti-affinity
zone:
description: The zone of the server group
returned: success
type: dict
sample: { 'slug': 'rma1' }
servers:
description: A list of servers that are part of the server group.
returned: if available
type: list
sample: []
state:
description: State of the server group.
returned: always
type: str
sample: present
tags:
description: Tags assosiated with the server group.
returned: success
type: dict
sample: { 'project': 'my project' }
'''
from ansible.module_utils.basic import AnsibleModule
from ..module_utils.api import (
AnsibleCloudscaleBase,
cloudscale_argument_spec,
)
def main():
argument_spec = cloudscale_argument_spec()
argument_spec.update(dict(
name=dict(type='str'),
uuid=dict(type='str'),
type=dict(type='str', default='anti-affinity'),
zone=dict(type='str'),
tags=dict(type='dict'),
state=dict(default='present', choices=['absent', 'present']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=(('name', 'uuid'),),
required_if=(('state', 'present', ('name',),),),
supports_check_mode=True,
)
cloudscale_server_group = AnsibleCloudscaleBase(
module,
resource_name='server-groups',
resource_create_param_keys=[
'name',
'type',
'zone',
'tags',
],
resource_update_param_keys=[
'name',
'tags',
],
)
cloudscale_server_group.query_constraint_keys = [
'zone',
]
if module.params['state'] == 'absent':
result = cloudscale_server_group.absent()
else:
result = cloudscale_server_group.present()
module.exit_json(**result)
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
dfeed478a0ba3d78ed18820bc893a7854be0d037 | 87278e84fb4cd4b8dedd4a42cf3a51d48e749ec4 | /ch11/findall1.py | 78647c0739f421f77730c7b484524a8eb6d5d006 | [] | no_license | dykim822/Python | 083a6fb9be51e6cb3725a73ea8184f813f572abc | f6bd67d14e3a3a16934650cff6234e9cbad9ebce | refs/heads/main | 2023-07-09T23:50:05.059533 | 2021-07-28T05:07:35 | 2021-07-28T05:07:35 | 369,375,985 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | import re
match1 = re.findall('[0-9]', ' 1234') # findall 모든 문자 확인
print(match1)
match1 = re.findall('\s[0-9]', ' 1234') # \s 첫글자가 공란
print(match1)
match1 = re.search('[0-9]', ' 1234') # match는 첫번째 글자 확인
print(match1) | [
"dykim822@gmail.com"
] | dykim822@gmail.com |
4b1eb2a9839f0bc71f07b4f2671320c5b7184044 | bd55c7d73a95caed5f47b0031264ec05fd6ff60a | /apps/core/migrations/0027_auto_20180609_1100.py | 73a5cbc1c8a09ce2cd89c76ee508f42436010ea0 | [] | no_license | phonehtetpaing/ebdjango | 3c8610e2d96318aff3b1db89480b2f298ad91b57 | 1b77d7662ec2bce9a6377690082a656c8e46608c | refs/heads/main | 2023-06-26T13:14:55.319687 | 2021-07-21T06:04:58 | 2021-07-21T06:04:58 | 381,564,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,373 | py | # Generated by Django 2.0.5 on 2018-06-09 02:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0026_auto_20180609_1038'),
]
operations = [
migrations.CreateModel(
name='VendorEventSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('work_start_time', models.TimeField(null=True, verbose_name='work start time')),
('work_end_time', models.TimeField(null=True, verbose_name='work end time')),
('day_off_csv', models.CharField(max_length=32, null=True, verbose_name='off day')),
('buffer_period', models.IntegerField(null=True, verbose_name='buffer minutes')),
('is_google_calender_oauth', models.BooleanField(default=0, verbose_name='google_calender_oauth_flg')),
('admin_text', models.TextField(null=True, verbose_name='memo for admin')),
('regist_dt', models.DateTimeField(auto_now_add=True, null=True, verbose_name='regist datetime')),
('update_dt', models.DateTimeField(auto_now=True, null=True, verbose_name='update datetime')),
('is_delete', models.BooleanField(default=0, verbose_name='delete flg')),
('vendor_branch', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vendoreventsettings_vendor_branch', to='core.VendorBranch', verbose_name='vendor_branch')),
],
options={
'verbose_name': 'VendorEventSettings',
'permissions': (),
},
),
migrations.RemoveField(
model_name='vendorreservationsettings',
name='vendor_branch',
),
migrations.AlterField(
model_name='automessagecontroller',
name='auto_message_trigger',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagecontroller_auto_message_trigger', to='core.AutoMessageTrigger', verbose_name='auto_message_trigger'),
),
migrations.AlterField(
model_name='automessagehistory',
name='auto_message_condition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagehistory_auto_message_condition', to='core.AutoMessageCondition', verbose_name='auto_message_condition'),
),
migrations.AlterField(
model_name='automessagehistory',
name='auto_message_trigger',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagehistory_auto_message_trigger', to='core.AutoMessageTrigger', verbose_name='auto_message_trigger'),
),
migrations.AlterField(
model_name='automessagetrigger',
name='auto_message_condition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='automessagetrigger_auto_message_condition', to='core.AutoMessageCondition', verbose_name='auto_message_condition'),
),
migrations.DeleteModel(
name='VendorReservationSettings',
),
]
| [
"phonehtetpaing1221@gmail.com"
] | phonehtetpaing1221@gmail.com |
4f98ac5e7df773be7358e84dce7c4f459007f5ff | e2e1732b6eb1a7a6dfeba76762851ad06eb8e482 | /wangban/wangban/spiders/selecrawlers/selecommander.py | 8a0453a4f7a5a24a1d3c8ce9a7d5aab37f26f86f | [] | no_license | nightqiuhua/bigCrawlers | 551e80d55df492c89ae0e0e0bd70c0e5f873068d | 19b86130c8af057d06014865d150e3d2ed6cc319 | refs/heads/main | 2023-03-23T01:13:26.021850 | 2021-03-03T15:09:28 | 2021-03-03T15:09:28 | 344,165,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,733 | py | # -*- coding: utf-8 -*-
import os
import time
import re
import time
import json
from wangban_utils.mongo_util import MongodbClass
from wangban_utils.mysql_util import MySqlDBClass
from selenium.common.exceptions import UnexpectedAlertPresentException
from selenium.common.exceptions import NoAlertPresentException
from wangban_utils.redis_util import get_redis_conn
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from wangban_utils.logging_util import update_logging
from modify_func import all_modify_func
from scrapy.utils.project import get_project_settings
from wangban_utils.countitem import CountIitem
SETTINGS = get_project_settings()
class SeleCrawler:
def __init__(self):
self.redis_conn = get_redis_conn()
self.check_queue = SETTINGS['URLS_CHECK_TASKS']
self.mongo_conn = MongodbClass()
self.mysql_conn = MySqlDBClass()
self.logging_actor = update_logging()
self.counter = CountIitem()
def crawling_column(self,driver,link_dict,worker):
"""抓取an_sub_url下的所有目录页信息
参数 driver driver类
link_dict 字典
return elements_list 列表 包含an_sub_url下的所有目录页信息
"""
try:
driver.get(link_dict['an_sub_url'])
time.sleep(10)
service_abled = worker.service_able_check(driver)
self.waiting_method(driver,worker)
while self.alert_accept(driver) == True:
self.alert_accept(driver)
except Exception as e:
print('crawling_column error',e)
raise e
else:
total_page = worker.get_totalpage(driver)
elements_list = self.column_elements(driver,link_dict,worker,total_page)
return elements_list
def column_elements(self,driver,link_dict,worker,total_page=1):
#print('column_elements')
#知道总页数,就能够确定需要点击的次数
an_sub_origal = link_dict['an_sub']
column_url = link_dict['an_sub_url']
elements_list = []
for i in range(1,int(total_page)+1):
try:
self.waiting_method(driver,worker)
except Exception as e:
print('no element error',e)
raise e
an_refer_url = column_url + worker.post_suf.format(i)
elements = worker.get_elements(driver)
for element_value_dict in self.get_element_dict(worker,elements,driver,link_dict['an_sub'],an_sub_origal,an_refer_url,total_page):
elements_list.append(element_value_dict)
try:
worker.click_next_page(driver,page=i)
except Exception as e:
driver.get(column_url)
time.sleep(4)
while self.alert_accept(driver) == True:
self.alert_accept(driver)
return elements_list
def get_an_info(self,driver,content_task,worker):
an_url_key = content_task['an_url']
try:
driver.get(an_url_key)
time.sleep(5)
except Exception as e:
raise e
while self.alert_accept(driver) == True:
self.alert_accept(driver)
time.sleep(1)
content_task['_id'] = an_url_key
content_task['crawling_date'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
content_task['source_website'] = worker.source_website
content_task['website_area'] = worker.specific_area
content_task['specific_area'] = worker.county_modify(content_task)
content_task['project_title'] = content_task['an_title']
an_content = worker.get_content(driver)
try:
content = all_modify_func[worker.name](an_content)
except Exception as e:
print('func_moc error')
self.logging_actor.record_error_data(content_task,worker.name)
raise e
content_task['an_content'] = content
success_enabled = self.mongo_conn.insert_into_db(content_task,worker.name)
if success_enabled:
self.logging_actor.write_data(content_task,worker.name)
try:
check_data = {"_id":content_task["an_url"],"gettime":content_task["crawling_date"],"touch_time":content_task["crawling_date"]}
self.mongo_conn.insert_into_db(check_data,'check_collections')
self.mysql_conn.insert_into_db(content_task,'t_zhaobiao')
except Exception as e:
raise e
try:
self.counter.incr(worker.name)
except Exception as e:
pass
self.logging_actor.record_data(content_task,worker.name)
def get_element_dict(self,worker,elements,driver,an_sub,an_sub_origal,an_refer_url,total_page):
#print('get_element_dict')
for element in elements:
element_dict = {}
element_dict['an_url']=worker.get_elem_url(element,driver)
element_dict['an_title'] = worker.get_an_title(element,driver)
element_dict['on_date'] = worker.get_on_date(element,driver)
element_dict['an_sub'] = worker.get_an_sub(an_sub,element,driver)
element_dict['an_sub_origal'] = an_sub_origal
element_dict['an_refer_url'] = an_refer_url
element_dict['an_refer_total_page'] = total_page
element_dict['an_county'] = worker.get_an_county(element,driver)
element_dict['an_ref_page_items'] = len(elements)
element_dict['type'] ='content'
yield element_dict
def waiting_method(self,driver,worker):
try:
element = WebDriverWait(driver,15).until( #until 也属于WebDriverWait,代表一直等待,直到某元素可见,
#until_not与其相反,判断某个元素直到不存在
EC.presence_of_element_located((By.XPATH,worker.presence_elements(driver))) #presence_of_element_located主要判断页面元素kw在页面中存在。
)
except Exception as e:
print('waiting_method error',e)
raise e
#pass
#解决警告窗问题
def alert_accept(self,driver):
try:
alert = driver.switch_to_alert()
print("Aler text:" + alert.text)
alert.accept()
print("Alert detected, accept it")
return True
except UnexpectedAlertPresentException:
return False
except NoAlertPresentException:
return False
class SeleCommander(SeleCrawler):
def __init__(self):
super().__init__()
def run(self,driver,link_dict,worker):
pipe = self.redis_conn.pipeline(True)
try:
driver.delete_all_cookies()
#for link_key,link_value in link_dict.items():
if link_dict['type'] == 'sub':
sele_func = self.run_sub_work
if link_dict['type'] == 'column':
sele_func = self.run_column_work
if link_dict['type'] == 'content':
self.run_an_content_work(driver,link_dict,worker)
else:
for each_item in sele_func(driver,link_dict,worker):
input_value = json.dumps(each_item)
pipe.lpush(self.check_queue,input_value)
pipe.execute()
except Exception as e:
print('an error occur',e)
raise e
def run_sub_work(self,driver,link_dict,worker):
for an_info_item in self.spurncrawler(driver,link_dict,worker):
an_value = {'name':worker.name}
an_value.update(an_info_item)
yield an_value
def run_column_work(self,driver,column_task,worker):
driver.delete_all_cookies()
#driver,cookies_dict = self.driver_add_cookies(driver)
try:
for an_url_key,an_url_column_value in self.spurncrawler(driver,column_task,worker):
an_url_column_value['type'] = 'content'
an_url_column_value['name'] = worker.name
yield {an_url_key:an_url_column_value}
except Exception as e:
raise e
def run_an_content_work(self,driver,content_task,worker):
try:
self.get_an_info(driver,content_task,worker)
except Exception as e:
raise e
def spurncrawler(self,driver,link_dict,worker):
an_info_list = self.crawling_column(driver,link_dict,worker)
for an_info in an_info_list:
an_info['an_type'] = link_dict['an_type']
an_info['an_major'] = link_dict['an_major']
yield an_info | [
"1320551630@qq.com"
] | 1320551630@qq.com |
095109328be0865df86907682c2b5018d6e3f8a2 | 37ca4e22b376193a16fbf0f6140262f1318ca73f | /openapi_client/model/inline_response2006.py | 01ba209696df641c7101182f45cf9e98e8578ee2 | [] | no_license | garethpaul/tmp-twilio-oai-python | de07b7d8e9234a6d6be1e8406afd7ab5e2be4fb4 | 4ae86d8b41fd8d03930d921284f30600290a1d39 | refs/heads/master | 2023-02-26T18:10:55.351844 | 2021-01-29T00:33:30 | 2021-01-29T00:33:30 | 333,993,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,257 | py | """
Twilio - Api
This is the public Twilio REST API. # noqa: E501
The version of the OpenAPI document: 1.8.0
Contact: support@twilio.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from openapi_client.model.api_v2010_account_available_phone_number_country_available_phone_number_local import ApiV2010AccountAvailablePhoneNumberCountryAvailablePhoneNumberLocal
globals()['ApiV2010AccountAvailablePhoneNumberCountryAvailablePhoneNumberLocal'] = ApiV2010AccountAvailablePhoneNumberCountryAvailablePhoneNumberLocal
class InlineResponse2006(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'available_phone_numbers': ([ApiV2010AccountAvailablePhoneNumberCountryAvailablePhoneNumberLocal],), # noqa: E501
'end': (int,), # noqa: E501
'first_page_uri': (str,), # noqa: E501
'next_page_uri': (str,), # noqa: E501
'page': (int,), # noqa: E501
'page_size': (int,), # noqa: E501
'previous_page_uri': (str,), # noqa: E501
'start': (int,), # noqa: E501
'uri': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'available_phone_numbers': 'available_phone_numbers', # noqa: E501
'end': 'end', # noqa: E501
'first_page_uri': 'first_page_uri', # noqa: E501
'next_page_uri': 'next_page_uri', # noqa: E501
'page': 'page', # noqa: E501
'page_size': 'page_size', # noqa: E501
'previous_page_uri': 'previous_page_uri', # noqa: E501
'start': 'start', # noqa: E501
'uri': 'uri', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse2006 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
available_phone_numbers ([ApiV2010AccountAvailablePhoneNumberCountryAvailablePhoneNumberLocal]): [optional] # noqa: E501
end (int): [optional] # noqa: E501
first_page_uri (str): [optional] # noqa: E501
next_page_uri (str): [optional] # noqa: E501
page (int): [optional] # noqa: E501
page_size (int): [optional] # noqa: E501
previous_page_uri (str): [optional] # noqa: E501
start (int): [optional] # noqa: E501
uri (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"gareth@garethpaul.com"
] | gareth@garethpaul.com |
8ea056e89d91ca1179dcefd67ff5980681224ffb | 446d9c9e98bac9bb7d6ba9d6f2639fd1ab0e68af | /pythonBook/chapter07/exercise7-11.py | 7b56a91556b954028aedbaff335d986266f8c8bb | [] | no_license | thiagofb84jp/python-exercises | 062d85f4f95332549acd42bf98de2b20afda5239 | 88ad7365a0f051021034ac6f0683b3df2de57cdb | refs/heads/main | 2023-07-19T21:15:08.689041 | 2021-08-17T10:59:09 | 2021-08-17T10:59:09 | 308,311,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | # 7.11. Jogo da forca utilizando lista de strings para desenhar boneco da forca
palavras = ["casa", "bola", "mangueira", "uva", "quiabo", "computador",
"cobra", "lentilha", "arroz"]
indice = int(input("Digite um número: "))
palavra = palavras[(indice * 776) % len(palavras)]
for x in range(100):
print()
digitadas = []
acertos = []
erros = 0
linhasTxt = """
X==:==
X :
X
X
X
X
======
"""
linhas = []
for linha in linhasTxt.splitlines():
linhas.append(list(linha))
while True:
senha = ""
for letra in palavra:
senha += letra if letra in acertos else "."
print(senha)
if senha == palavra:
print("Você acertou!")
break
tentativa = input("\nDigite uma letra: ").lower().strip()
if tentativa in digitadas:
print("Você já tentou esta letra!")
continue
else:
digitadas += tentativa
if tentativa in palavra:
acertos += tentativa
else:
erros += 1
print("Você errou!")
print("X==:==\nX : ")
print("X 0 " if erros >= 1 else "X")
linha2 = ""
if erros == 2:
linha2 = r" | "
elif erros == 3:
linha2 = r" \| "
elif erros >= 4:
linha2 = r" \|/ "
print(f"X{linha2}")
linha3 = ""
if erros == 5:
linha3 += r" / "
elif erros >= 6:
linha3 += r" / \ "
print(f"X{linha3}")
print("X\n==========")
if erros == 6:
print("Enforcado!")
print(f"A palavra secreta era: {palavra}")
| [
"thiagofb84jp@gmail.com"
] | thiagofb84jp@gmail.com |
5c3cd8ad47b46a000dbb5da79b49de3fc8d4f40c | 0cdcee391e178092d7073734957075c72681f037 | /hackerrank/si/si-finding-frequency.py | fd7b5d044d2333049511cc5159fc814aaea62281 | [] | no_license | hrishikeshtak/Coding_Practises_Solutions | 6b483bbf19d5365e18f4ea1134aa633ff347a1c1 | 86875d7436a78420591a60b716acd2780287b4a8 | refs/heads/master | 2022-10-06T18:44:56.992451 | 2022-09-25T03:29:03 | 2022-09-25T03:29:03 | 125,744,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | #!/usr/bin/python3
# Quick Sort + Binary Search => O(NlogN + QlogN)
def partition(arr, lo, hi):
pivot = arr[hi-1]
i = lo - 1
for j in range(lo, hi-1):
if arr[j] <= pivot:
i += 1
arr[i], arr[j] = arr[j], arr[i]
arr[i+1], arr[hi-1] = arr[hi-1], arr[i+1]
return i+1
def quick_sort(arr, lo, hi):
if lo < hi:
p = partition(arr, lo, hi)
# print(arr, p)
quick_sort(arr, lo, p)
quick_sort(arr, p+1, hi)
def BS1(arr, K):
# Return First index of element
lo = 0
hi = len(arr)-1
ans = -1
while lo <= hi:
mid = (lo + hi) // 2
if arr[mid] == K:
ans = mid
hi = mid - 1
elif arr[mid] < K:
lo = mid + 1
else:
hi = mid - 1
return ans
def BS2(arr, K):
# Return Last index of element
lo = 0
hi = len(arr)-1
ans = -1
while lo <= hi:
mid = (lo + hi) // 2
if arr[mid] == K:
ans = mid
lo = mid + 1
elif arr[mid] < K:
lo = mid + 1
else:
hi = mid - 1
return ans
def finding_frequency(arr, N, K):
x = BS1(arr, K)
y = BS2(arr, K)
# print(x, y)
if x == -1 or y == -1:
return 0
else:
return (y - x) + 1
if __name__ == "__main__":
N = int(input())
arr = list(map(int, input().split()))
# print(arr)
quick_sort(arr, 0, N)
# print(arr)
for Q in range(int(input())):
K = int(input())
print(finding_frequency(arr, N, K))
| [
"hrishikesh.tak@oneconvergence.com"
] | hrishikesh.tak@oneconvergence.com |
78ad07c06308d98770e27a8af86b6748553e3938 | 4b80b53d42cf3c303a58d6234291aaf5a8bc7a4f | /examples/webcam/webcam.py | dba875abb57bae78263cbc399a5fdd8ae8a32869 | [
"BSD-3-Clause"
] | permissive | caboteria/aiortc | 8ea9e869cbc7bc9ef677e4e2f5bf30bc94d259f3 | f85f7133435b54ce9de5f2f391c0c0ef0014e820 | refs/heads/master | 2020-09-12T19:37:07.477686 | 2019-11-04T14:50:47 | 2019-11-04T14:50:47 | 222,529,347 | 3 | 0 | BSD-3-Clause | 2019-11-18T19:38:13 | 2019-11-18T19:38:12 | null | UTF-8 | Python | false | false | 3,137 | py | import argparse
import asyncio
import json
import logging
import os
import platform
import ssl
from aiohttp import web
from aiortc import RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.media import MediaPlayer
ROOT = os.path.dirname(__file__)
async def index(request):
content = open(os.path.join(ROOT, "index.html"), "r").read()
return web.Response(content_type="text/html", text=content)
async def javascript(request):
content = open(os.path.join(ROOT, "client.js"), "r").read()
return web.Response(content_type="application/javascript", text=content)
async def offer(request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
pc = RTCPeerConnection()
pcs.add(pc)
@pc.on("iceconnectionstatechange")
async def on_iceconnectionstatechange():
print("ICE connection state is %s" % pc.iceConnectionState)
if pc.iceConnectionState == "failed":
await pc.close()
pcs.discard(pc)
# open media source
if args.play_from:
player = MediaPlayer(args.play_from)
else:
options = {"framerate": "30", "video_size": "640x480"}
if platform.system() == "Darwin":
player = MediaPlayer("default:none", format="avfoundation", options=options)
else:
player = MediaPlayer("/dev/video0", format="v4l2", options=options)
await pc.setRemoteDescription(offer)
for t in pc.getTransceivers():
if t.kind == "audio" and player.audio:
pc.addTrack(player.audio)
elif t.kind == "video" and player.video:
pc.addTrack(player.video)
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type}
),
)
pcs = set()
async def on_shutdown(app):
# close peer connections
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
pcs.clear()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="WebRTC webcam demo")
parser.add_argument("--cert-file", help="SSL certificate file (for HTTPS)")
parser.add_argument("--key-file", help="SSL key file (for HTTPS)")
parser.add_argument("--play-from", help="Read the media from a file and sent it."),
parser.add_argument(
"--port", type=int, default=8080, help="Port for HTTP server (default: 8080)"
)
parser.add_argument("--verbose", "-v", action="count")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if args.cert_file:
ssl_context = ssl.SSLContext()
ssl_context.load_cert_chain(args.cert_file, args.key_file)
else:
ssl_context = None
app = web.Application()
app.on_shutdown.append(on_shutdown)
app.router.add_get("/", index)
app.router.add_get("/client.js", javascript)
app.router.add_post("/offer", offer)
web.run_app(app, port=args.port, ssl_context=ssl_context)
| [
"jeremy.laine@m4x.org"
] | jeremy.laine@m4x.org |
10eb48208a23e05214dce95000fdaa72926fe379 | 858ccfa59703f5c6b822c0a88d72ac84610f6353 | /Day 6/exercise.py | a89cb1cfb0151fde47f367a63432eca3810dd577 | [] | no_license | purusottam234/Python-Class | 7db8d6084bc2271c00bd0bb88e70768fb86fcc3e | df09421e3a1a110ef592b0e0c971ca824854a4d8 | refs/heads/main | 2023-05-09T07:35:51.931483 | 2021-06-05T15:45:56 | 2021-06-05T15:45:56 | 364,840,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | # 1. program to determine the first power of 7 greater than 1000
product = 7
while product <= 1000:
product = product * 7
print(product)
# 2.Use the range function and a for statement to calculate the total of the integer from 0 to 1000000
total = 0
for number in range(1000001):
total += number
print(total)
# 3.Display f-string in which you insert the value of the variables number1(7) and number2(5) and their product. display string should be:
# 7 times 5 is 35
number1 = 7
number2 = 5
mul = number1*number2
print(f'{number1} times {number2} is {mul}')
| [
"purusottamadhikari234@gmail.com"
] | purusottamadhikari234@gmail.com |
7c7968948bbb512805492e446d8cc02e2418e385 | d257ddf7e6959d0989d76080a8a048e82393657f | /002_TemplateMatching/001_template_match_provided.py | 2ee8a6ec9e5ed11e52303834c1d83472c132e0c5 | [
"MIT"
] | permissive | remichartier/027_selfDrivingCarND_ObjectDetectionExercises | d210f37b7baf306dd034c09f62e125b263f8270d | ccd853c975d35df5f31e1a445a1a8757b8bd13f5 | refs/heads/main | 2023-04-17T08:09:55.465143 | 2021-05-03T07:11:16 | 2021-05-03T07:11:16 | 362,013,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('bbox-example-image.jpg')
#image = mpimg.imread('temp-matching-example-2.jpg')
templist = ['cutout1.jpg', 'cutout2.jpg', 'cutout3.jpg',
'cutout4.jpg', 'cutout5.jpg', 'cutout6.jpg']
# Here is your draw_boxes function from the previous exercise
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a function that takes an image and a list of templates as inputs
# then searches the image and returns the a list of bounding boxes
# for matched templates
def find_matches(img, template_list):
# Make a copy of the image to draw on
# Define an empty list to take bbox coords
bbox_list = []
# Iterate through template list
# Read in templates one by one
# Use cv2.matchTemplate() to search the image
# using whichever of the OpenCV search methods you prefer
# Use cv2.minMaxLoc() to extract the location of the best match
# Determine bounding box corners for the match
# Return the list of bounding boxes
return bbox_list
bboxes = find_matches(image, templist)
result = draw_boxes(image, bboxes)
plt.imshow(result) | [
"remipr.chartier@gmail.com"
] | remipr.chartier@gmail.com |
2e3b493d91df12e0a3baa4ceaf9e41d9bfec86ea | e8f99a162207cba82d4e0f969d7bcdb2b9d8b522 | /dev_demo/tmp_file_demo2.py | 39d04c5233f6ba6a8a18ad7a744ff59847bfbcf3 | [] | no_license | TesterCC/Python3Scripts | edb5446278ebf13edb64336001081941ca27d67d | 58be67e1ffc74ef50289a885aa4ad05f58e2c383 | refs/heads/master | 2023-08-30T21:16:38.328045 | 2023-08-17T11:23:08 | 2023-08-17T11:23:08 | 93,401,996 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | # -*- coding: utf-8 -*-
# @Time : 2022/8/28
# @Author : SecCodeCat
# 创建临时文件
import tempfile
'''
http://t.zoukankan.com/liuhui0308-p-12464003.html
https://blog.csdn.net/weixin_37926734/article/details/123563067
https://docs.python.org/zh-tw/dev/library/tempfile.html
'''
# fp = tempfile.TemporaryFile()
# print(fp.name)
# fp.write('两情若是久长时,'.encode('utf-8'))
# fp.write('又岂在朝朝暮暮。'.encode('utf-8'))
# # 将文件指针移到开始处,准备读取文件
# fp.seek(0)
# print(fp.read().decode('utf-8')) # 输出刚才写入的内容
# # 关闭文件,该文件将会被自动删除
# fp.close()
# 通过with语句创建临时文件,with会自动关闭临时文件
# with tempfile.TemporaryFile() as fp:
with tempfile.NamedTemporaryFile() as fp:
# 写入内容
fp.write(b'I Love Security, Python and Go!')
# 将文件指针移到开始处,准备读取文件
fp.seek(0)
# 读取文件内容
print(fp.read()) # b'I Love Python!'
print("temp file name: ", fp.name)
# 通过with语句创建临时目录
with tempfile.TemporaryDirectory() as tmpdirname:
print('创建临时目录', tmpdirname)
| [
"testerlyx@foxmail.com"
] | testerlyx@foxmail.com |
80d9c3ff54ab696e355e64cbd68bbafd5b1c6aeb | 08353419541e9f3be586a7e575585a55c98b976b | /src/pythonfinder/__init__.py | 25fd2ed74524c8053dee4975d7dac772f70b6875 | [
"MIT"
] | permissive | immerrr/pythonfinder | df9a923644c2fb5f91115aa54d2e12b5d50bff99 | f072cf19cfebff73229a19e24bfffd378716d742 | refs/heads/master | 2020-04-01T00:56:27.668668 | 2018-10-11T09:45:12 | 2018-10-11T09:45:12 | 152,719,560 | 0 | 0 | MIT | 2018-10-12T08:35:20 | 2018-10-12T08:35:20 | null | UTF-8 | Python | false | false | 284 | py | from __future__ import print_function, absolute_import
__version__ = '1.1.1.dev0'
__all__ = ["Finder", "WindowsFinder", "SystemPath", "InvalidPythonVersion"]
from .pythonfinder import Finder
from .models import SystemPath, WindowsFinder
from .exceptions import InvalidPythonVersion
| [
"dan@danryan.co"
] | dan@danryan.co |
80c4495cf6e71d58bc92bfe8640a7d25193c3d2a | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_4_neat/16_0_4_coolbouy_codejam4.py | 3d006c88b9ccd44321ca82d0760a06418d9ad546 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 302 | py | # your code goes here
import sys
T=int(input())
num=T
for k in range(0,num):
K,C,S = map(int,sys.stdin.readline().split())
print("Case #",end="")
print(k+1,end=": ")
for i in range(0,K,1):
if i!=K-1:
temp=i*pow(K,C-1)+1
print(temp,end=" ")
else:
temp=i*pow(K,C-1)+1
print(temp)
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
716723e46591c1620beeabe94cce5492e920b866 | 651a296c8f45b5799781fd78a6b5329effe702a0 | /legendre_product_polynomial/polynomial_sort.py | 1ec578df1da3c92db966c42cc5e401a2b22fa2e5 | [] | no_license | pdhhiep/Computation_using_Python | 095d14370fe1a01a192d7e44fcc81a52655f652b | 407ed29fddc267950e9860b8bbd1e038f0387c97 | refs/heads/master | 2021-05-29T12:35:12.630232 | 2015-06-27T01:05:17 | 2015-06-27T01:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,215 | py | #!/usr/bin/env python
def polynomial_sort ( o, c, e ):
#*****************************************************************************80
#
## POLYNOMIAL_SORT sorts the information in a polynomial.
#
# Discussion:
#
# The coefficients C and exponents E are rearranged so that
# the elements of E are in ascending order.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 27 October 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer O, the "order" of the polynomial.
#
# Input, real C[O], the coefficients of the scaled polynomial.
#
# Input, integer E[O], the indices of the exponents of
# the scaled polynomial.
#
# Output, real C[O], the coefficients of the sorted polynomial.
#
# Output, integer E[O], the indices of the exponents of
# the sorted polynomial.
#
from i4vec_permute import i4vec_permute
from i4vec_sort_heap_index_a import i4vec_sort_heap_index_a
from r8vec_permute import r8vec_permute
indx = i4vec_sort_heap_index_a ( o, e )
e = i4vec_permute ( o, indx, e )
c = r8vec_permute ( o, indx, c )
return c, e
def polynomial_sort_test ( ):
#*****************************************************************************80
#
## POLYNOMIAL_SORT_TEST tests POLYNOMIAL_SORT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 27 October 2014
#
# Author:
#
# John Burkardt
#
from polynomial_print import polynomial_print
import numpy as np
m = 3
o = 6
c = np.array ( [ 0.0, 9.0, -5.0, - 13.0, 7.0, 11.0 ], dtype = np.float64 )
e = np.array ( [ 12, 4, 2, 33, 1, 5 ], dtype = np.int32 )
print ''
print 'POLYNOMIAL_SORT_TEST'
print ' POLYNOMIAL_SORT sorts a polynomial by exponent index.'
print ''
title = ' Unsorted polynomial:'
polynomial_print ( m, o, c, e, title )
c, e = polynomial_sort ( o, c, e )
print ''
title = ' Sorted polynomial:'
polynomial_print ( m, o, c, e, title )
print ''
print 'POLYNOMIAL_SORT_TEST:'
print ' Normal end of execution.'
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
polynomial_sort_test ( )
timestamp ( )
| [
"siplukabir@gmail.com"
] | siplukabir@gmail.com |
acda951565a8ca9395ca49144cb5fab259a066b7 | f68afe06e4bbf3d523584852063e767e53441b2b | /Toontown/toontown/hood/BossbotHQAI.py | 3b10ecb5ff983612b9311f78f57786a5d65e665e | [] | no_license | DankMickey/Toontown-Offline-Squirting-Flower-Modded- | eb18908e7a35a5f7fc95871814207858b94e2600 | 384754c6d97950468bb62ddd8961c564097673a9 | refs/heads/master | 2021-01-19T17:53:36.591832 | 2017-01-15T02:00:04 | 2017-01-15T02:00:04 | 34,639,744 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,548 | py | from toontown.building import DistributedBBElevatorAI
from toontown.building import FADoorCodes
from toontown.building.DistributedBoardingPartyAI import DistributedBoardingPartyAI
from toontown.coghq import DistributedCogKartAI
from toontown.hood import CogHQAI
from toontown.suit import DistributedBossbotBossAI
from toontown.suit import DistributedSuitPlannerAI
from toontown.toonbase import ToontownGlobals
class BossbotHQAI(CogHQAI.CogHQAI):
def __init__(self, air):
CogHQAI.CogHQAI.__init__(
self, air, ToontownGlobals.BossbotHQ, ToontownGlobals.BossbotLobby,
FADoorCodes.BB_DISGUISE_INCOMPLETE,
DistributedBBElevatorAI.DistributedBBElevatorAI,
DistributedBossbotBossAI.DistributedBossbotBossAI)
self.cogKarts = []
self.courseBoardingParty = None
self.suitPlanners = []
self.startup()
def startup(self):
CogHQAI.CogHQAI.startup(self)
self.createCogKarts()
if simbase.config.GetBool('want-boarding-groups', True):
self.createCourseBoardingParty()
if simbase.config.GetBool('want-suit-planners', True):
self.createSuitPlanners()
def createCogKarts(self):
posList = ((-26.5658, 237.459, 0), (132.197, 227.845, 0), (-28.725, -235.706, 0))
hprList = ((-159, 0, 0), (172, 0, 0), (-21, 0, 0))
mins = ToontownGlobals.FactoryLaffMinimums[3]
for cogCourse in xrange(len(posList)):
pos = posList[cogCourse]
hpr = hprList[cogCourse]
cogKart = DistributedCogKartAI.DistributedCogKartAI(
self.air, cogCourse,
pos[0], pos[1], pos[2], hpr[0], hpr[1], hpr[2],
self.air.countryClubMgr, minLaff=mins[cogCourse])
cogKart.generateWithRequired(self.zoneId)
self.cogKarts.append(cogKart)
def createCourseBoardingParty(self):
cogKartIdList = []
for cogKart in self.cogKarts:
cogKartIdList.append(cogKart.doId)
self.courseBoardingParty = DistributedBoardingPartyAI(self.air, cogKartIdList, 4)
self.courseBoardingParty.generateWithRequired(self.zoneId)
def createSuitPlanners(self):
suitPlanner = DistributedSuitPlannerAI.DistributedSuitPlannerAI(self.air, self.zoneId)
suitPlanner.generateWithRequired(self.zoneId)
suitPlanner.d_setZoneId(self.zoneId)
suitPlanner.initTasks()
self.suitPlanners.append(suitPlanner)
self.air.suitPlanners[self.zoneId] = suitPlanner
| [
"jareddarty96@gmail.com"
] | jareddarty96@gmail.com |
26a8369da37cc8e23ab8a102609a8d5ead3ac030 | bcf0e03ebd7e55588dcf48ab5d990534f8d9ab0c | /Hackerrank/Archive 2019/ginortS.py | ad58d6a3130006607742eb1cc5781973998896a6 | [] | no_license | nsky80/competitive_programming | 731321aaf42d9ae546f1d13bbb05215a1fbcfe45 | 9b0c0ffccf092d4d4bbf50cac1746f44dd977d57 | refs/heads/master | 2022-02-06T11:58:44.313635 | 2022-01-30T09:20:15 | 2022-01-30T09:20:15 | 199,516,791 | 1 | 2 | null | 2022-01-30T09:20:16 | 2019-07-29T19:43:17 | Python | UTF-8 | Python | false | false | 558 | py | # Sample Input
#
# Sorting1234
# Sample Output
#
# ginortS1324
import re
if __name__ == "__main__":
b = input()
n = list(map(int, re.findall(r'[0-9]', b)))
even_lst = []
odd_lst = []
for j in n:
if j % 2 == 0:
even_lst.append(j)
else:
odd_lst.append(j)
s = re.findall(r'[a-z]', b)
u = re.findall(r'[A-Z]', b)
s.sort()
u.sort()
even_lst.sort()
odd_lst.sort()
res = "".join(["".join(s), "".join(u), "".join(list(map(str, sum([odd_lst, even_lst], []))))])
print(res)
| [
"satishkumary80@gmail.com"
] | satishkumary80@gmail.com |
b871e309663b4f943adca7c9fa274a8c90a9a1d6 | c4a0669126f2fbf757ac3b33a8279ef32305bbd7 | /Data Project/Notes/Packages/Animals/Mammals.py | 53b40dc655716ea125ebbfceeb31e9faf7a428dd | [] | no_license | ezeutno/PycharmProject | 822b5a7da05729c5241a03b7413548a34b12e4a5 | bdb87599885287d2d7cd5cd703b62197563722b8 | refs/heads/master | 2021-07-18T20:55:08.605486 | 2017-10-24T03:14:10 | 2017-10-24T03:14:10 | 105,782,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | def printMammal():
print('I am a Mammal') | [
"ivan.suratno@gmail.com"
] | ivan.suratno@gmail.com |
37798f2901a1126fcf537c806d09590b0e0ad4ec | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /dockerized-gists/2944123/snippet.py | 66b8345386111bdad2c832d22d7126998d38f57a | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 236 | py | # http://www.linuxhomenetworking.com/forums/showthread.php/1095-Linux-console-Colors-And-Other-Trick-s
def printWarning(input):
print("\033[31m%s\033[0m" % input)
def funkyprint(input):
print("\033[36m%s\033[0m" % input)
| [
"gistshub@gmail.com"
] | gistshub@gmail.com |
ae13497b779a93386ca0521a3559b59ac08c34dd | 6580ba5d135c4f33f1a0996953ba2a65f7458a14 | /applications/ji164/models/fdproduct0detail.py | e5f4defa77c1af088148b38abd895677142b86db | [
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | ali96343/facew2p | 02b038d3853691264a49de3409de21c8a33544b8 | a3881b149045e9caac344402c8fc4e62edadb42f | refs/heads/master | 2021-06-10T17:52:22.200508 | 2021-05-10T23:11:30 | 2021-05-10T23:11:30 | 185,795,614 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,651 | py | #
# table for controller: product_detail
#
from gluon.contrib.populate import populate
db.define_table('dproduct0detail',
Field('f0', label='key', writable = True , length= 1000),
Field('f1', 'text', label='data string', length= 1000),
Field('f2', 'text', label='save data string', length= 1000, default='' ),
)
#
if not db(db.dproduct0detail.id ).count():
db.dproduct0detail.insert( f0= 'sp887', f1= '(887)outdated')
db.dproduct0detail.insert( f0= 'pc888', f1= '(888)to improve your experience.')
db.dproduct0detail.insert( f0= 'aa889', f1= '(889)upgrade your browser')
db.dproduct0detail.insert( f0= 'sx894', f1= '(894)Ecommerce')
db.dproduct0detail.insert( f0= 'sx896', f1= '(896)Dashboard v.1')
db.dproduct0detail.insert( f0= 'sx898', f1= '(898)Dashboard v.2')
db.dproduct0detail.insert( f0= 'sx900', f1= '(900)Dashboard v.3')
db.dproduct0detail.insert( f0= 'sx902', f1= '(902)Product List')
db.dproduct0detail.insert( f0= 'sx904', f1= '(904)Product Edit')
db.dproduct0detail.insert( f0= 'sx906', f1= '(906)Product Detail')
db.dproduct0detail.insert( f0= 'sx908', f1= '(908)Product Cart')
db.dproduct0detail.insert( f0= 'sx910', f1= '(910)Product Payment')
db.dproduct0detail.insert( f0= 'sx912', f1= '(912)Analytics')
db.dproduct0detail.insert( f0= 'sx914', f1= '(914)Widgets')
db.dproduct0detail.insert( f0= 'sx916', f1= '(916)Mailbox')
db.dproduct0detail.insert( f0= 'sx918', f1= '(918)Inbox')
db.dproduct0detail.insert( f0= 'sx920', f1= '(920)View Mail')
db.dproduct0detail.insert( f0= 'sx922', f1= '(922)Compose Mail')
db.dproduct0detail.insert( f0= 'sx924', f1= '(924)Interface')
db.dproduct0detail.insert( f0= 'sx926', f1= '(926)Google Map')
db.dproduct0detail.insert( f0= 'sx928', f1= '(928)Data Maps')
db.dproduct0detail.insert( f0= 'sx930', f1= '(930)Pdf Viewer')
db.dproduct0detail.insert( f0= 'sx932', f1= '(932)X-Editable')
db.dproduct0detail.insert( f0= 'sx934', f1= '(934)Code Editor')
db.dproduct0detail.insert( f0= 'sx936', f1= '(936)Tree View')
db.dproduct0detail.insert( f0= 'sx938', f1= '(938)Preloader')
db.dproduct0detail.insert( f0= 'sx940', f1= '(940)Images Cropper')
db.dproduct0detail.insert( f0= 'sx942', f1= '(942)Miscellaneous')
db.dproduct0detail.insert( f0= 'sx944', f1= '(944)File Manager')
db.dproduct0detail.insert( f0= 'sx946', f1= '(946)Blog')
db.dproduct0detail.insert( f0= 'sx948', f1= '(948)Blog Details')
db.dproduct0detail.insert( f0= 'sx950', f1= '(950)404 Page')
db.dproduct0detail.insert( f0= 'sx952', f1= '(952)500 Page')
db.dproduct0detail.insert( f0= 'sx954', f1= '(954)Charts')
db.dproduct0detail.insert( f0= 'sx956', f1= '(956)Bar Charts')
db.dproduct0detail.insert( f0= 'sx958', f1= '(958)Line Charts')
db.dproduct0detail.insert( f0= 'sx960', f1= '(960)Area Charts')
db.dproduct0detail.insert( f0= 'sx962', f1= '(962)Rounded Charts')
db.dproduct0detail.insert( f0= 'sx964', f1= '(964)C3 Charts')
db.dproduct0detail.insert( f0= 'sx966', f1= '(966)Sparkline Charts')
db.dproduct0detail.insert( f0= 'sx968', f1= '(968)Peity Charts')
db.dproduct0detail.insert( f0= 'sx970', f1= '(970)Data Tables')
db.dproduct0detail.insert( f0= 'sx972', f1= '(972)Static Table')
db.dproduct0detail.insert( f0= 'sx974', f1= '(974)Data Table')
db.dproduct0detail.insert( f0= 'sx976', f1= '(976)Forms Elements')
db.dproduct0detail.insert( f0= 'sx978', f1= '(978)Bc Form Elements')
db.dproduct0detail.insert( f0= 'sx980', f1= '(980)Ad Form Elements')
db.dproduct0detail.insert( f0= 'sx982', f1= '(982)Password Meter')
db.dproduct0detail.insert( f0= 'sx984', f1= '(984)Multi Upload')
db.dproduct0detail.insert( f0= 'sx986', f1= '(986)Text Editor')
db.dproduct0detail.insert( f0= 'sx988', f1= '(988)Dual List Box')
db.dproduct0detail.insert( f0= 'sx990', f1= '(990)App views')
db.dproduct0detail.insert( f0= 'sx992', f1= '(992)Notifications')
db.dproduct0detail.insert( f0= 'sx994', f1= '(994)Alerts')
db.dproduct0detail.insert( f0= 'sx996', f1= '(996)Modals')
db.dproduct0detail.insert( f0= 'sx998', f1= '(998)Buttons')
db.dproduct0detail.insert( f0= 'sx1000', f1= '(1000)Tabs')
db.dproduct0detail.insert( f0= 'sx1002', f1= '(1002)Accordion')
db.dproduct0detail.insert( f0= 'sx1003', f1= '(1003)Pages')
db.dproduct0detail.insert( f0= 'sx1005', f1= '(1005)Login')
db.dproduct0detail.insert( f0= 'sx1007', f1= '(1007)Register')
db.dproduct0detail.insert( f0= 'sx1009', f1= '(1009)Lock')
db.dproduct0detail.insert( f0= 'sx1011', f1= '(1011)Password Recovery')
db.dproduct0detail.insert( f0= 'sx1012', f1= '(1012)Landing Page')
db.dproduct0detail.insert( f0= 'aa1015', f1= '(1015)Home')
db.dproduct0detail.insert( f0= 'aa1016', f1= '(1016)About')
db.dproduct0detail.insert( f0= 'aa1017', f1= '(1017)Services')
db.dproduct0detail.insert( f0= 'aa1018', f1= '(1018)Support')
db.dproduct0detail.insert( f0= 'hh1019', f1= '(1019)Message')
db.dproduct0detail.insert( f0= 'sx1021', f1= '(1021)16 Sept')
db.dproduct0detail.insert( f0= 'hh1022', f1= '(1022)Advanda Cro')
db.dproduct0detail.insert( f0= 'pa1023', f1= '(1023)Please done this project as soon possible.')
db.dproduct0detail.insert( f0= 'sx1025', f1= '(1025)16 Sept')
db.dproduct0detail.insert( f0= 'hh1026', f1= '(1026)Sulaiman din')
db.dproduct0detail.insert( f0= 'pa1027', f1= '(1027)Please done this project as soon possible.')
db.dproduct0detail.insert( f0= 'sx1029', f1= '(1029)16 Sept')
db.dproduct0detail.insert( f0= 'hh1030', f1= '(1030)Victor Jara')
db.dproduct0detail.insert( f0= 'pa1031', f1= '(1031)Please done this project as soon possible.')
db.dproduct0detail.insert( f0= 'sx1033', f1= '(1033)16 Sept')
db.dproduct0detail.insert( f0= 'hh1034', f1= '(1034)Victor Jara')
db.dproduct0detail.insert( f0= 'pa1035', f1= '(1035)Please done this project as soon possible.')
db.dproduct0detail.insert( f0= 'aa1036', f1= '(1036)View All Messages')
db.dproduct0detail.insert( f0= 'hh1037', f1= '(1037)Notifications')
db.dproduct0detail.insert( f0= 'sx1038', f1= '(1038)16 Sept')
db.dproduct0detail.insert( f0= 'hh1039', f1= '(1039)Advanda Cro')
db.dproduct0detail.insert( f0= 'pa1040', f1= '(1040)Please done this project as soon possible.')
db.dproduct0detail.insert( f0= 'sx1041', f1= '(1041)16 Sept')
db.dproduct0detail.insert( f0= 'hh1042', f1= '(1042)Sulaiman din')
db.dproduct0detail.insert( f0= 'pa1043', f1= '(1043)Please done this project as soon possible.')
db.dproduct0detail.insert( f0= 'sx1044', f1= '(1044)16 Sept')
db.dproduct0detail.insert( f0= 'hh1045', f1= '(1045)Victor Jara')
db.dproduct0detail.insert( f0= 'pa1046', f1= '(1046)Please done this project as soon possible.')
db.dproduct0detail.insert( f0= 'sx1047', f1= '(1047)16 Sept')
db.dproduct0detail.insert( f0= 'hh1048', f1= '(1048)Victor Jara')
db.dproduct0detail.insert( f0= 'pa1049', f1= '(1049)Please done this project as soon possible.')
db.dproduct0detail.insert( f0= 'aa1050', f1= '(1050)View All Notification')
db.dproduct0detail.insert( f0= 'sx1051', f1= '(1051)Advanda Cro')
db.dproduct0detail.insert( f0= 'aa1055', f1= '(1055)News')
db.dproduct0detail.insert( f0= 'aa1056', f1= '(1056)Activity')
db.dproduct0detail.insert( f0= 'aa1057', f1= '(1057)Settings')
db.dproduct0detail.insert( f0= 'pa1058', f1= '(1058)You have 10 New News.')
db.dproduct0detail.insert( f0= 'pa1060', f1= '(1060)The point of using Lorem Ipsum is that it has a more-or-less normal.')
db.dproduct0detail.insert( f0= 'sp1061', f1= '(1061)Yesterday 2:45 pm')
db.dproduct0detail.insert( f0= 'pa1063', f1= '(1063)The point of using Lorem Ipsum is that it has a more-or-less normal.')
db.dproduct0detail.insert( f0= 'sp1064', f1= '(1064)Yesterday 2:45 pm')
db.dproduct0detail.insert( f0= 'pa1066', f1= '(1066)The point of using Lorem Ipsum is that it has a more-or-less normal.')
db.dproduct0detail.insert( f0= 'sp1067', f1= '(1067)Yesterday 2:45 pm')
db.dproduct0detail.insert( f0= 'pa1069', f1= '(1069)The point of using Lorem Ipsum is that it has a more-or-less normal.')
db.dproduct0detail.insert( f0= 'sp1070', f1= '(1070)Yesterday 2:45 pm')
db.dproduct0detail.insert( f0= 'pa1072', f1= '(1072)The point of using Lorem Ipsum is that it has a more-or-less normal.')
db.dproduct0detail.insert( f0= 'sp1073', f1= '(1073)Yesterday 2:45 pm')
db.dproduct0detail.insert( f0= 'pa1075', f1= '(1075)The point of using Lorem Ipsum is that it has a more-or-less normal.')
db.dproduct0detail.insert( f0= 'sp1076', f1= '(1076)Yesterday 2:45 pm')
db.dproduct0detail.insert( f0= 'pa1078', f1= '(1078)The point of using Lorem Ipsum is that it has a more-or-less normal.')
db.dproduct0detail.insert( f0= 'sp1079', f1= '(1079)Yesterday 2:45 pm')
db.dproduct0detail.insert( f0= 'pa1081', f1= '(1081)The point of using Lorem Ipsum is that it has a more-or-less normal.')
db.dproduct0detail.insert( f0= 'sp1082', f1= '(1082)Yesterday 2:45 pm')
db.dproduct0detail.insert( f0= 'pa1084', f1= '(1084)The point of using Lorem Ipsum is that it has a more-or-less normal.')
db.dproduct0detail.insert( f0= 'sp1085', f1= '(1085)Yesterday 2:45 pm')
db.dproduct0detail.insert( f0= 'pa1087', f1= '(1087)The point of using Lorem Ipsum is that it has a more-or-less normal.')
db.dproduct0detail.insert( f0= 'sp1088', f1= '(1088)Yesterday 2:45 pm')
db.dproduct0detail.insert( f0= 'pa1089', f1= '(1089)You have 20 Recent Activity.')
db.dproduct0detail.insert( f0= 'hh1090', f1= '(1090)New User Registered')
db.dproduct0detail.insert( f0= 'pa1091', f1= '(1091)The point of using Lorem Ipsum is that it has a more or less normal.')
db.dproduct0detail.insert( f0= 'sx1092', f1= '(1092)1 hours ago')
db.dproduct0detail.insert( f0= 'hh1093', f1= '(1093)New Order Received')
db.dproduct0detail.insert( f0= 'pa1094', f1= '(1094)The point of using Lorem Ipsum is that it has a more or less normal.')
db.dproduct0detail.insert( f0= 'sx1095', f1= '(1095)2 hours ago')
db.dproduct0detail.insert( f0= 'hh1096', f1= '(1096)New Order Received')
db.dproduct0detail.insert( f0= 'pa1097', f1= '(1097)The point of using Lorem Ipsum is that it has a more or less normal.')
db.dproduct0detail.insert( f0= 'sx1098', f1= '(1098)3 hours ago')
db.dproduct0detail.insert( f0= 'hh1099', f1= '(1099)New Order Received')
db.dproduct0detail.insert( f0= 'pa1100', f1= '(1100)The point of using Lorem Ipsum is that it has a more or less normal.')
db.dproduct0detail.insert( f0= 'sx1101', f1= '(1101)4 hours ago')
db.dproduct0detail.insert( f0= 'hh1102', f1= '(1102)New User Registered')
db.dproduct0detail.insert( f0= 'pa1103', f1= '(1103)The point of using Lorem Ipsum is that it has a more or less normal.')
db.dproduct0detail.insert( f0= 'sx1104', f1= '(1104)5 hours ago')
db.dproduct0detail.insert( f0= 'hh1105', f1= '(1105)New Order')
db.dproduct0detail.insert( f0= 'pa1106', f1= '(1106)The point of using Lorem Ipsum is that it has a more or less normal.')
db.dproduct0detail.insert( f0= 'sx1107', f1= '(1107)6 hours ago')
db.dproduct0detail.insert( f0= 'hh1108', f1= '(1108)New User')
db.dproduct0detail.insert( f0= 'pa1109', f1= '(1109)The point of using Lorem Ipsum is that it has a more or less normal.')
db.dproduct0detail.insert( f0= 'sx1110', f1= '(1110)7 hours ago')
db.dproduct0detail.insert( f0= 'hh1111', f1= '(1111)New Order')
db.dproduct0detail.insert( f0= 'pa1112', f1= '(1112)The point of using Lorem Ipsum is that it has a more or less normal.')
db.dproduct0detail.insert( f0= 'sx1113', f1= '(1113)9 hours ago')
db.dproduct0detail.insert( f0= 'pa1114', f1= '(1114)You have 20 Settings. 5 not completed.')
db.dproduct0detail.insert( f0= 'hh1115', f1= '(1115)Show notifications')
db.dproduct0detail.insert( f0= 'hh1116', f1= '(1116)Disable Chat')
db.dproduct0detail.insert( f0= 'hh1117', f1= '(1117)Enable history')
db.dproduct0detail.insert( f0= 'hh1118', f1= '(1118)Show charts')
db.dproduct0detail.insert( f0= 'hh1119', f1= '(1119)Update everyday')
db.dproduct0detail.insert( f0= 'hh1120', f1= '(1120)Global search')
db.dproduct0detail.insert( f0= 'hh1121', f1= '(1121)Offline users')
db.dproduct0detail.insert( f0= 'aa1123', f1= '(1123)Dashboard v.1')
db.dproduct0detail.insert( f0= 'aa1125', f1= '(1125)Dashboard v.2')
db.dproduct0detail.insert( f0= 'aa1127', f1= '(1127)Dashboard v.3')
db.dproduct0detail.insert( f0= 'aa1129', f1= '(1129)Product List')
db.dproduct0detail.insert( f0= 'aa1131', f1= '(1131)Product Edit')
db.dproduct0detail.insert( f0= 'aa1133', f1= '(1133)Product Detail')
db.dproduct0detail.insert( f0= 'aa1135', f1= '(1135)Product Cart')
db.dproduct0detail.insert( f0= 'aa1137', f1= '(1137)Product Payment')
db.dproduct0detail.insert( f0= 'aa1139', f1= '(1139)Analytics')
db.dproduct0detail.insert( f0= 'aa1141', f1= '(1141)Widgets')
db.dproduct0detail.insert( f0= 'aa1143', f1= '(1143)Inbox')
db.dproduct0detail.insert( f0= 'aa1145', f1= '(1145)View Mail')
db.dproduct0detail.insert( f0= 'aa1147', f1= '(1147)Compose Mail')
db.dproduct0detail.insert( f0= 'aa1149', f1= '(1149)File Manager')
db.dproduct0detail.insert( f0= 'aa1151', f1= '(1151)Contacts Client')
db.dproduct0detail.insert( f0= 'aa1153', f1= '(1153)Project')
db.dproduct0detail.insert( f0= 'aa1155', f1= '(1155)Project Details')
db.dproduct0detail.insert( f0= 'aa1157', f1= '(1157)Blog')
db.dproduct0detail.insert( f0= 'aa1159', f1= '(1159)Blog Details')
db.dproduct0detail.insert( f0= 'aa1161', f1= '(1161)404 Page')
db.dproduct0detail.insert( f0= 'aa1163', f1= '(1163)500 Page')
db.dproduct0detail.insert( f0= 'aa1165', f1= '(1165)Google Map')
db.dproduct0detail.insert( f0= 'aa1167', f1= '(1167)Data Maps')
db.dproduct0detail.insert( f0= 'aa1169', f1= '(1169)Pdf Viewer')
db.dproduct0detail.insert( f0= 'aa1171', f1= '(1171)X-Editable')
db.dproduct0detail.insert( f0= 'aa1173', f1= '(1173)Code Editor')
db.dproduct0detail.insert( f0= 'aa1175', f1= '(1175)Tree View')
db.dproduct0detail.insert( f0= 'aa1177', f1= '(1177)Preloader')
db.dproduct0detail.insert( f0= 'aa1179', f1= '(1179)Images Cropper')
db.dproduct0detail.insert( f0= 'aa1181', f1= '(1181)Bar Charts')
db.dproduct0detail.insert( f0= 'aa1183', f1= '(1183)Line Charts')
db.dproduct0detail.insert( f0= 'aa1185', f1= '(1185)Area Charts')
db.dproduct0detail.insert( f0= 'aa1187', f1= '(1187)Rounded Charts')
db.dproduct0detail.insert( f0= 'aa1189', f1= '(1189)C3 Charts')
db.dproduct0detail.insert( f0= 'aa1191', f1= '(1191)Sparkline Charts')
db.dproduct0detail.insert( f0= 'aa1193', f1= '(1193)Peity Charts')
db.dproduct0detail.insert( f0= 'aa1195', f1= '(1195)Static Table')
db.dproduct0detail.insert( f0= 'aa1197', f1= '(1197)Data Table')
db.dproduct0detail.insert( f0= 'aa1199', f1= '(1199)Basic Form Elements')
db.dproduct0detail.insert( f0= 'aa1201', f1= '(1201)Advanced Form Elements')
db.dproduct0detail.insert( f0= 'aa1203', f1= '(1203)Password Meter')
db.dproduct0detail.insert( f0= 'aa1205', f1= '(1205)Multi Upload')
db.dproduct0detail.insert( f0= 'aa1207', f1= '(1207)Text Editor')
db.dproduct0detail.insert( f0= 'aa1209', f1= '(1209)Dual List Box')
db.dproduct0detail.insert( f0= 'aa1211', f1= '(1211)Basic Form Elements')
db.dproduct0detail.insert( f0= 'aa1213', f1= '(1213)Advanced Form Elements')
db.dproduct0detail.insert( f0= 'aa1215', f1= '(1215)Password Meter')
db.dproduct0detail.insert( f0= 'aa1217', f1= '(1217)Multi Upload')
db.dproduct0detail.insert( f0= 'aa1219', f1= '(1219)Text Editor')
db.dproduct0detail.insert( f0= 'aa1221', f1= '(1221)Dual List Box')
db.dproduct0detail.insert( f0= 'aa1223', f1= '(1223)Login')
db.dproduct0detail.insert( f0= 'aa1225', f1= '(1225)Register')
db.dproduct0detail.insert( f0= 'aa1227', f1= '(1227)Lock')
db.dproduct0detail.insert( f0= 'aa1229', f1= '(1229)Password Recovery')
db.dproduct0detail.insert( f0= 'pb1230', f1= '(1230)Search...')
db.dproduct0detail.insert( f0= 'aa1231', f1= '(1231)Home')
db.dproduct0detail.insert( f0= 'sx1232', f1= '(1232)Product Details')
db.dproduct0detail.insert( f0= 'hh1242', f1= '(1242)Jewelery ITEM TITLE')
db.dproduct0detail.insert( f0= 'sx1243', f1= '(1243)$150.00')
db.dproduct0detail.insert( f0= 'hh1244', f1= '(1244)Size')
db.dproduct0detail.insert( f0= 'hh1245', f1= '(1245)Color')
db.dproduct0detail.insert( f0= 'hh1246', f1= '(1246)Quality')
db.dproduct0detail.insert( f0= 'aa1247', f1= '(1247)ADD TO Cart')
db.dproduct0detail.insert( f0= 'hh1248', f1= '(1248)share this on')
db.dproduct0detail.insert( f0= 'hh1249', f1= '(1249)OVERVIEW')
db.dproduct0detail.insert( f0= 'aa1250', f1= '(1250)description')
db.dproduct0detail.insert( f0= 'aa1251', f1= '(1251)INFORMATION')
db.dproduct0detail.insert( f0= 'pa1252', f1= '(1252)No reviews yet.')
db.dproduct0detail.insert( f0= 'pa1253', f1= '(1253)Your Rating')
db.dproduct0detail.insert( f0= 'pb1254', f1= '(1254)User Name')
db.dproduct0detail.insert( f0= 'pb1255', f1= '(1255)Last Name')
db.dproduct0detail.insert( f0= 'pb1256', f1= '(1256)Email')
db.dproduct0detail.insert( f0= 'pc1257', f1= '(1257)All rights reserved.')
db.dproduct0detail.insert( f0= 'pf1258', f1= '(1258)Copyright © 2018')
db.commit()
#
| [
"ab96343@gmail.com"
] | ab96343@gmail.com |
1f6b0db4d2ed04dd37abbe540807efd13d8e76bb | cb324391aa08aea41faeff9ae58f9ad81ef5ac30 | /ssseg/cfgs/ocrnet/cfgs_ade20k_hrnetv2w18s.py | 8d42127b4a14a73294e44d227751ca32c3c505a7 | [
"MIT"
] | permissive | suyanzhou626/sssegmentation | 841205baf6b1edb0f94c91fe347550886c3aea49 | 55084161c216e9c8c0a7bb1d154200ab81eb2522 | refs/heads/main | 2023-08-15T06:35:41.836575 | 2021-09-14T01:33:50 | 2021-09-14T01:33:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | '''define the config file for ade20k and hrnetv2w-18-small'''
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG['train'].update(
{
'type': 'ade20k',
'rootdir': 'data/ADE20k',
}
)
DATASET_CFG['test'].update(
{
'type': 'ade20k',
'rootdir': 'data/ADE20k',
}
)
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 130
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 150,
'backbone': {
'type': 'hrnetv2_w18_small',
'series': 'hrnet',
'pretrained': True,
'selected_indices': (0, 0),
},
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'ocrnet_hrnetv2w18s_ade20k_train',
'logfilepath': 'ocrnet_hrnetv2w18s_ade20k_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'ocrnet_hrnetv2w18s_ade20k_test',
'logfilepath': 'ocrnet_hrnetv2w18s_ade20k_test/test.log',
'resultsavepath': 'ocrnet_hrnetv2w18s_ade20k_test/ocrnet_hrnetv2w18s_ade20k_results.pkl'
}
) | [
"1159254961@qq.com"
] | 1159254961@qq.com |
86d2ce44f553a827461a97282430593362029aea | 0049d7959ff872e2ddf6ea3ce83b6c26512425a6 | /django_demo_applications/djangoprojectsot/blog_project/blog/migrations/0002_comment.py | c5696599e3d51a93da698aeed82aa74f6c965f53 | [] | no_license | srazor09/Django_projects | 9806ab25d966af780cdabe652a1792220c7806a8 | 8d664ba4c9478bd93c8e5bcbcaf594e8ffe6ce93 | refs/heads/master | 2023-04-18T02:13:15.993393 | 2021-05-04T20:34:05 | 2021-05-04T20:34:05 | 364,379,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-05 15:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
options={
'ordering': ('-created',),
},
),
]
| [
"sourabhaws09@gmail.com"
] | sourabhaws09@gmail.com |
0dbb943458f2c86fae6f65b0ea378179e0299942 | c5200b0cab496328cb5e37f2b3a51d5536ae3458 | /CRUD_App/models.py | 4f6d8deaa62dbe513d37f0aa7bcfbf58580e7e62 | [] | no_license | Touhid7051/Toll-system-Django | b426047719f077cd1534d5a4d12abdc271ba1c6d | d5fbbc1af6e245f3814b95a1a2c5fa44ef5e1260 | refs/heads/main | 2023-01-25T04:19:02.453007 | 2020-12-07T11:37:47 | 2020-12-07T11:37:47 | 315,890,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Student_Admission(models.Model):
name = models.CharField(max_length=200)
father_name = models.CharField(max_length=200)
mother_name = models.CharField(max_length=200)
number = models.IntegerField()
email = models.EmailField(max_length=200)
student_image = models.ImageField(upload_to="Student_Admission")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
def image_url(self):
if self.student_image:
return self.student_image.url
else:
return ""
class User_Profile(models.Model):
user = models.OneToOneField(User, null=True ,on_delete=models.CASCADE, blank=True)
name = models.CharField(max_length=200, null=True , blank=True)
phone = models.IntegerField( null=True , blank=True)
email = models.EmailField(max_length=200, null=True , blank=True)
date_created = models.DateTimeField(auto_now_add=True ,null=True ,blank=True)
def __str__(self):
return self.name
| [
"touhidul15-7051@diu.edu.bd"
] | touhidul15-7051@diu.edu.bd |
f56064abeee2161d2d22b4585280c087ffce1296 | 822d3cd484b54f0531fc205520c765a8321c0613 | /pyFile/16/rabbitmq/2一对多默认交换/消费者.py | 427ca4fbba7d68155fc35c5bf5cb55bbfaaec3a0 | [] | no_license | mghxy123/learnPython | 31d1cc18deeed5a89864ca0333fe488e0dbf08b4 | 00740e87d55a4dffd78773deaff8689485df31e8 | refs/heads/master | 2021-07-21T14:31:02.421788 | 2020-06-27T11:28:01 | 2020-06-27T11:28:01 | 187,751,182 | 0 | 0 | null | 2020-06-07T05:14:05 | 2019-05-21T02:58:35 | Python | UTF-8 | Python | false | false | 1,236 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : 消费者.py
# Author: HuXianyong
# Date : 2019/7/28 15:07
import pika
queue_name = 'hello'
params = pika.URLParameters('amqp://hxy:hxy@192.168.18.181:5672/test')
connection = pika.BlockingConnection(params)
channel = connection.channel()
# 队列声明
channel.queue_declare(queue = queue_name) # 声明一个Q,存在就是用,不存在就创建
def call_back(ch,method,properties,body):
print(1,body)
def call_back1(ch,method,properties,body):
print(2,body)
with connection:
# 每一个消费者使用一个basic_consume
channel.basic_consume(queue=queue_name,
auto_ack=True,
on_message_callback=call_back)
## 这里模拟一生产者对二个消费者的情况,可以是一个信道对应连个consume
## 也可以是开启两个进程两个信道对应两个consume
# channel.basic_consume(queue=queue_name,
# auto_ack=True,
# on_message_callback=call_back1)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
# 启动所有消费,直到所有消费都结束,才能退出,它是阻塞的.
| [
"mghxy123@163.com"
] | mghxy123@163.com |
2ec3264972ba16783b405b5a1c54edbefc7bed13 | cdc1705a813eeb17f8f17caff1aeb5b6a6f5e686 | /project/scanner/models.py | d55b823671fa19c5bcbf0a2fa42de49e8e185a31 | [
"MPL-2.0",
"BSD-3-Clause"
] | permissive | mozilla/minion-frontend-old | 13791dc7b0c6cf464a89fb44b002a7e84ff49929 | a9af49f7e7c130820056f9ca4977d59161e5211a | refs/heads/master | 2023-07-03T19:18:50.694581 | 2013-01-31T00:05:23 | 2013-01-31T00:05:23 | 7,925,548 | 0 | 1 | BSD-3-Clause | 2022-01-18T18:55:42 | 2013-01-30T22:43:10 | JavaScript | UTF-8 | Python | false | false | 548 | py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.contrib.auth.models import User
from django.db import models
class Scan(models.Model):
scan_id = models.CharField(max_length=100, primary_key = True)
scan_creator = models.ForeignKey(User)
scan_date = models.DateTimeField(auto_now_add=True)
scan_url = models.URLField()
scan_plan = models.CharField(max_length=100)
| [
"stefan@arentz.ca"
] | stefan@arentz.ca |
11092818351d13c04af03ec8b765666ea7587db3 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/420/usersdata/314/88743/submittedfiles/exe11.py | a8131def3845324b6d19c1007b1e118b03b47fa0 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # -*- coding: utf-8 -*-
n=8
soma=0
n=int(input('Digite um numero com 8 digitos: '))
while(t==8):
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
76b12f773db2d2363e82999e4bf1956601dd6424 | 27545601844006324ba9126089389fe2cd6aa742 | /Liushui/garbage/analysis.py | 08fd81d7d2f0d56e9c7a318d945d9ac07d736bae | [] | no_license | AllenLiuX/Aitai-Bill-Analysis-with-NN | 8e6d7aef5b6bd43b620c95b7e13db44a634fcdb9 | e69198ed4ce30033481f28fd948dd42780d8c75a | refs/heads/master | 2023-03-13T19:52:07.244617 | 2021-03-01T15:54:38 | 2021-03-01T15:54:38 | 307,239,057 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,694 | py | # -- coding:UTF-8 --
import pandas as pd
import numpy as np
import time
import Modules.mongodb as mongo
import Modules.public_module as md
class Analyzer:
def __init__(self, name):
self.name = name
self.company_name = ''
self.file_paths = []
self.dates = []
self.self_accounts = []
self.path2account = {}
def get_paths(self):
return self.file_paths
def get_infos(self):
forms = mongo.show_datas(self.name, {'type': 'form'}, 'mapping')
if not forms:
return False
for form in forms:
self.file_paths.append(form['path'])
self.dates.append(form['dates'])
self.self_accounts.append(form['account'])
self.path2account[form['path']] = form['account']
self.company_name = forms[0]['company_name']
# make dates from str to int
for d in range(len(self.dates)):
self.dates[d][0] = int(self.dates[d][0])
self.dates[d][1] = int(self.dates[d][1])
print(self.file_paths)
print(self.dates)
print(self.self_accounts)
return True
"""
@:param error_tolerance 是交易后算余额所能容忍的误差值。建议设置大于1
"""
def balance_check(self, error_tolerance, file_path):
cur_df = pd.read_excel(file_path)
invalid = []
income = cur_df['流入金额'].values
out = cur_df['流出金额'].values
balance = cur_df['交易后余额'].values
for i in range(1, len(income)):
if (not np.isnan(income[i])) and income[i] != 0:
if abs(balance[i-1] + income[i] - balance[i]) > error_tolerance:
invalid.append(i)
elif (not np.isnan(out[i])) and out[i] != 0:
if abs(balance[i-1] - out[i] != balance[i]) > error_tolerance:
invalid.append(i)
# else:
# invalid.append(i)
# print(cur_df.loc[invalid]['交易日期'].values[:5])
invalid_dates = cur_df.loc[invalid]['交易日期'].values.tolist() # 提取所有不正确余额对应的日期 <class 'numpy.ndarray'>
print('ratio of invalid balance: ', len(invalid_dates)/len(income))
return invalid_dates
def benford_check(self, file_path):
cur_df = pd.read_excel(file_path)
income = cur_df['流入金额'].values
out = cur_df['流出金额'].values
# balance = cur_df['交易后余额'].values
income2, out2, balance2 = [], [], []
# print(income)
for i in range(len(income)):
if not np.isnan(income[i]):
income2.append(income[i])
if not np.isnan(out[i]):
out2.append(out[i])
all = income2 + out2
res = md.benford(all)
print('benford coefficient: ', res[0])
print('total samples: ', len(all))
return res[0], len(all)
def info_missing_check(self, file_path):
cur_df = pd.read_excel(file_path)
abstract = cur_df['摘要'].values
receiver_name = cur_df['对方名称'].values
abstract_num = 0
receiver_num = 0
for i in range(len(abstract)):
if type(abstract[i]) != str:
abstract_num += 1
if type(receiver_name[i]) != str:
receiver_num += 1
print('缺失的对方名称有:', receiver_num)
print('缺失的摘要有:', abstract_num)
return [abstract_num, receiver_num]
def dates_check(self):
merged_dates = md.merge_dates(self.dates)
print('the merged dates are:', merged_dates)
return merged_dates
def inner_account_check(self):
invalid_accounts = []
for path in self.file_paths:
cur_df = pd.read_excel(path)
accounts = [] # 对方账号
for index in cur_df.index: # 逐行找向自己公司转账的条目,并提取账号
if cur_df.loc[index, '对方名称'] == self.company_name:
cur_account = cur_df.loc[index, '对方账号']
accounts.append(cur_account)
if cur_account not in self.self_accounts:
invalid_accounts.append(cur_account)
print('missing accounts:', invalid_accounts)
return invalid_accounts
def cross_validation(self):
invalid_accounts = []
account2df = {}
# 先把账号下表格都打开
for path in self.file_paths:
cur_df = pd.read_excel(path)
account2df[self.path2account[path]] = cur_df
account2trans = {}
for account in self.self_accounts:
cur_df = account2df[account]
accounts = [] # 对方账号
for index in cur_df.index: # 逐行找向自己公司转账的条目,并提取账号
if cur_df.loc[index, '对方名称'] == self.company_name:
cur_account = cur_df.loc[index, '对方账号']
accounts.append(cur_account)
if cur_account not in self.self_accounts:
invalid_accounts.append(cur_account)
cur_trans = cur_df.loc[index]
if account not in account2trans:
account2trans[account] = [cur_trans]
else:
account2trans[account].append(cur_trans)
unmatched_trans = []
for from_acc, trans in account2trans.items():
for tran in trans:
tran_date = tran.loc['交易日期']
tran_in = tran.loc['流入金额']
tran_out = tran.loc['流出金额']
out_acc = tran.loc['对方账号']
if out_acc in account2df:
to_df = account2df[out_acc]
else:
print('not existed account: ', out_acc)
continue
matched = False
for index in cur_df.index:
if cur_df.loc[index, '对方账号'] == from_acc and cur_df.loc[index, '交易日期'] == tran_date:
if cur_df.loc[index, '流入金额'] == tran_out or cur_df.loc[index, '流出金额'] == tran_in:
print('Get one matched transaction.', from_acc, out_acc)
matched = True
break
if not matched:
print('---- not matched!----\n', tran)
unmatched_trans.append(tran)
# print('missing accounts:', invalid_accounts)
return unmatched_trans
def run(name):
analyst = Analyzer(name)
infostatus = analyst.get_infos()
if not infostatus:
return 'invalid name'
res = {}
print('------ Reports ------')
for path in analyst.get_paths():
cur_info = {}
print('----- '+path+' ------')
cur_info['balence_error_dates'] = analyst.balance_check(0, path)
cur_info['benford'] = analyst.benford_check(path)
infomiss = analyst.info_missing_check(path)
cur_info['abstract_missing'] = infomiss[0]
cur_info['receiver_missing'] = infomiss[1]
res[path] = cur_info
print('----- overall report -----')
res['dates_coverage'] = analyst.dates_check()
res['missing_accounts'] = analyst.inner_account_check()
res['unmatched_trans'] = analyst.cross_validation()
return res
if __name__ == '__main__':
start_time = time.time()
res = run('tongpu')
print(res)
print("--- %s seconds ---" % (time.time() - start_time)) | [
"13120200491@163.com"
] | 13120200491@163.com |
12d1d66a6c71abe3a1b39f8ed964b38b52784cc2 | 88a5d63f9e4843f6a607d8f9f3d95a9972e91253 | /rhos_bootstrap/distribution.py | 07e498ac7eb99ae00ed76bba86a32a2fe335c31a | [
"Apache-2.0"
] | permissive | strider/rhos-bootstrap | a416964cfb5ddc06dd1922406726fed92a180cba | ef3dd6b6ca102219e991acecc6b8476404deb323 | refs/heads/main | 2023-03-30T05:36:01.295870 | 2021-03-31T20:40:49 | 2021-03-31T20:40:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,872 | py | # Copyright 2020 Red Hat, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os
import subprocess
import yaml
from rhos_bootstrap import constants
from rhos_bootstrap import exceptions
from rhos_bootstrap.utils import repos
from rhos_bootstrap.utils import dnf
from rhos_bootstrap.utils import rhsm
LOG = logging.getLogger(__name__)
class DistributionInfo:
"""Distribution information"""
def __init__(
self,
distro_id: str = None,
distro_version_id: str = None,
distro_name: str = None,
):
"""Distribution Information class"""
_id, _version_id, _name = (None, None, None)
if not distro_id or not distro_version_id or not distro_name:
output = subprocess.Popen(
"source /etc/os-release && " 'echo -e -n "$ID\n$VERSION_ID\n$NAME"',
shell=True,
stdout=subprocess.PIPE,
stderr=open(os.devnull, "w"),
executable="/bin/bash",
universal_newlines=True,
).communicate()
_id, _version_id, _name = output[0].split("\n")
self._distro_id = distro_id or _id
self._distro_version_id = distro_version_id or _version_id
self._distro_name = distro_name or _name
self._is_stream = "stream" in self._distro_name.lower()
self._load_data()
def _load_data(self):
data_path = os.path.join(constants.RHOS_VERSIONS_DIR, f"{self.distro_id}.yaml")
if not os.path.exists(data_path):
LOG.error("%s does not exist", data_path)
raise exceptions.DistroNotSupported(self.distro_id)
with open(data_path, "r") as data:
self._distro_data = yaml.safe_load(data.read())
@property
def distro_data(self):
return self._distro_data
@property
def distro_id(self):
return self._distro_id
@property
def distro_version_id(self):
return self._distro_version_id
@property
def distro_major_version_id(self):
return self._distro_version_id.split(".")[0]
@property
def distro_minor_version_id(self):
if len(self._distro_version_id.split(".")) < 2:
# CentOS Stream doesn't have a minor version
return ""
return self._distro_version_id.split(".")[1]
@property
def is_stream(self):
return self._is_stream
@property
def distro_name(self):
return self._distro_name
@property
def distros(self):
return self._distro_data.get("distros", {})
@property
def versions(self):
return self._distro_data.get("versions", {})
@property
def distro_normalized_id(self):
ver = [
self.distro_id,
self.distro_major_version_id,
]
if self.distro_minor_version_id:
# handle period before minor version if exists
ver.append("." + self.distro_minor_version_id)
if self.is_stream:
ver.append("-stream")
return "".join(ver)
def __str__(self):
return self.distro_normalized_id
def validate_distro(self, version) -> bool:
if version not in self.versions:
LOG.warning(
"%s not in defined in release information",
version,
)
return False
# make sure distro is in the listed distributions
distros = self.versions[version].get("distros", [])
if self.distro_normalized_id not in distros:
LOG.warning(
"%s not in %s",
self.distro_normalized_id,
distros,
)
return False
# make sure subscription manager is at least registered and base os locked
if "rhel" in self.distro_id:
submgr = rhsm.SubscriptionManager.instance()
submgr.status()
_, out, _ = submgr.release()
ver = f"{self.distro_major_version_id}.{self.distro_minor_version_id}"
# The output will be "Release not set" or "Release: X.Y"
if "not set" in out or f": {ver}" not in out:
LOG.warning(
"System not currently locked to the correct release. "
"Please run subscription-manager release --set=%s",
ver,
)
raise exceptions.SubscriptionManagerConfigError()
return True
def get_version(self, version) -> dict:
if version not in self.versions:
LOG.error("%s is not available in version list", version)
raise exceptions.VersionNotSupported(version)
return self.versions.get(version, {})
def construct_repo(self, repo_type, version, name):
# RHEL only supports rhsm
if "rhel" in self.distro_id:
return repos.RhsmRepo(name)
if "centos" in repo_type:
return repos.TripleoCentosRepo(repo_type, name)
if "ceph" in repo_type:
return repos.TripleoCephRepo(self.distro_normalized_id, name)
if "delorean" in repo_type:
dlrn_dist = f"{self.distro_id}{self.distro_major_version_id}"
return repos.TripleoDeloreanRepos(dlrn_dist, version, name)
raise exceptions.RepositoryNotSupported(repo_type)
def get_repos(self, version, enable_ceph: bool = False) -> list:
r = []
dist = self.distro_normalized_id
version_data = self.get_version(version)
if dist not in version_data["repos"]:
LOG.warning("%s missing from version repos", dist)
# handle distro specific repos
for name in version_data["repos"].get(dist, []):
r.append(self.construct_repo(dist, version, name))
# handle other software related repos
for repo in constants.SUPPORTED_REPOS:
for name in version_data["repos"].get(repo, []):
if not enable_ceph and "ceph" in name:
continue
r.append(self.construct_repo(dist, version, name))
return r
def get_modules(self, version) -> list:
r = []
module_data = self.get_version(version).get("modules", {})
for item in module_data.items():
r.append(dnf.DnfModule(*item))
return r
| [
"aschultz@redhat.com"
] | aschultz@redhat.com |
ffd1407a0f604f8cb4df10becc9e15b94189fb82 | 19b0bae543c1effc1bca1605aefe41f4903ed401 | /python/l1TRun3Ntuplizer_cfi.py | 3b9450f6004c19b729f057a7c955c9acc0147cfb | [] | no_license | skkwan/boostedTauRun3Ntuplizer | 2bd88b3a943509c1cccc5b93ad2d968e34f33b3a | d7bb8445b6847aaa2a0f17e9c432e91b066f8a08 | refs/heads/main | 2023-03-14T09:14:22.748535 | 2021-03-04T21:38:08 | 2021-03-04T21:38:08 | 344,615,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,799 | py | import FWCore.ParameterSet.Config as cms
l1NtupleProducer = cms.EDAnalyzer("Run3Ntuplizer",
ecalDigis = cms.InputTag( 'l1tCaloLayer1Digis'),
hcalDigis = cms.InputTag( 'l1tCaloLayer1Digis'),
recoJets = cms.InputTag("slimmedJets"),
recoJetsAK8 = cms.InputTag("slimmedJetsAK8"),
miniTaus = cms.InputTag("slimmedTaus"),
genParticles = cms.InputTag("genParticles", "", "HLT"),
recoPtCut = cms.double(10),
UCTRegion = cms.InputTag('uct2016EmulatorDigis'),
l1UCTCentralJets = cms.InputTag("uct2016EmulatorDigis","Central"),
l1UCTForwardJets = cms.InputTag("uct2016EmulatorDigis","Forward"),
stage2Taus = cms.InputTag("l1extraParticles","Tau"),
stage2IsoTaus = cms.InputTag("l1extraParticles","IsoTau"),
stage2DigisTaus = cms.InputTag("caloStage2Digis", "Tau"),
gmtStage2Digis = cms.InputTag("simGmtStage2Digis"),
genJets = cms.InputTag("slimmedGenJets"),
isData = cms.bool(True),
folderName = cms.untracked.string("Stage3Regions")
)
#BXVector<l1t::Muon> "simGmtStage2Digis" "" "HLT"
| [
"ojalvo@wisc.edu"
] | ojalvo@wisc.edu |
cb7cdcc214b82450770ba2d1d88182e1fdbc1783 | a8494a11352d221d6cf2108bf3fb1eca6e42c6ec | /source/samples/lex-lambdas/pizza_order/test_pizza_order_handler.py | e90a32d38c49bd865e6d5e2cc2f46e6e6042d67e | [
"Apache-2.0",
"ISC",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kevinschwarz/serverless-bot-framework | c5fbbe0805d55d3a8e2f17fb1a83adf103c1852e | 4b063418ed60a7d3ea7d8f81ed8cd040c43123c7 | refs/heads/main | 2023-04-20T16:54:50.937023 | 2021-05-12T13:27:55 | 2021-05-12T13:27:55 | 366,723,404 | 0 | 0 | NOASSERTION | 2021-05-12T13:22:56 | 2021-05-12T13:22:56 | null | UTF-8 | Python | false | false | 9,620 | py | ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the 'License'). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import os
import logging
from unittest import TestCase
from unittest.mock import Mock, patch
logger = logging.getLogger()
logger.setLevel(logging.INFO)
mock_env_variables = {
"PIZZA_ORDERS_TABLE": "test_table",
"PIZZA_ORDERS_INDEX": "test_index",
"AWS_SDK_USER_AGENT": '{ "user_agent_extra": "AwsSolution/1234/1.6.0" }',
}
@patch.dict(os.environ, mock_env_variables)
class PizzaOrderHandlerTests(TestCase):
@patch("pizza_order.pizza_order_handler.place_order")
@patch(
"pizza_order.pizza_order_handler.get_cancel_message",
return_value="mock cancel message",
)
@patch(
"pizza_order.pizza_order_handler.empty_slot_values",
return_value="mock slot values",
)
@patch(
"pizza_order.pizza_order_handler.get_menu_message",
return_value="mock menu message",
)
@patch(
"pizza_order.pizza_order_handler.get_confirmation_message",
return_value="mock confirmation",
)
@patch("pizza_order.pizza_order_handler.respond")
def test_handle_full_slots(
self,
mock_respond,
mock_get_confirmation_message,
mock_get_menu_message,
mock_empty_slot_values,
mock_get_cancel_message,
mock_place_order,
):
from pizza_order.pizza_order_handler import handle_full_slots
intent_request = {
"sessionState": {
"intent": {
"confirmationState": "None",
"slots": {
"type": {"value": {"resolvedValues": ["testtype"]}},
"size": {"value": {"resolvedValues": ["testsize"]}},
"crust": {"value": {"resolvedValues": ["testcrust"]}},
"count": {"value": {"resolvedValues": ["testcount"]}},
},
}
},
"sessionId": "testid",
}
locale_id = "en_US"
order_id = "1234-1234-1234-1234"
# when confirmation_state is None
handle_full_slots(intent_request, locale_id, order_id)
mock_respond.assert_called_with(
intent_request,
message="mock confirmation",
dialog_action_type="ConfirmIntent",
fulfillment_state="InProgress",
)
# when confirmation_state is Denied with active context
intent_request["sessionState"]["intent"]["confirmationState"] = "Denied"
intent_request["sessionState"]["activeContexts"] = ["testContext"]
handle_full_slots(intent_request, locale_id, order_id)
mock_respond.assert_called_with(
intent_request,
message="mock menu message",
fulfillment_state="InProgress",
dialog_action_type="ElicitSlot",
slot_to_elicit="type",
slots="mock slot values",
)
# confirmation_state = Denied without active context
intent_request["sessionState"]["activeContexts"] = []
handle_full_slots(intent_request, locale_id, order_id)
mock_respond.assert_called_with(
intent_request,
message="mock cancel message",
fulfillment_state="Failed",
dialog_action_type="Close",
)
# confirmation_state = Confirmed
intent_request["sessionState"]["intent"]["confirmationState"] = "Confirmed"
handle_full_slots(intent_request, locale_id, order_id)
mock_place_order.assert_called_with(intent_request, order_id, logger)
@patch(
"pizza_order.pizza_order_handler.get_menu_message",
return_value="mock menu message",
)
@patch(
"pizza_order.pizza_order_handler.update_slot_values",
return_value="mock slot values",
)
@patch(
"pizza_order.pizza_order_handler.get_repeat_message",
return_value="mock repeat message",
)
@patch("pizza_order.pizza_order_handler.check_last_order")
@patch("pizza_order.pizza_order_handler.respond")
def test_handle_emtpy_slots(
self,
mock_respond,
mock_check_last_order,
mock_get_repeat_message,
mock_update_slot_values,
mock_get_menu_message,
):
from pizza_order.pizza_order_handler import handle_empty_slots
intent_request = {}
locale_id = "en_US"
active_context = {
"name": "repeatOrder",
"contextAttributes": {"repeatLastOrder": "Pending"},
"timeToLive": {"turnsToLive": 2, "timeToLiveInSeconds": 300},
}
# last order exists
mock_check_last_order.return_value = (True, 'mock last order')
handle_empty_slots(intent_request, locale_id)
mock_respond.assert_called_with(
intent_request,
message="mock repeat message",
fulfillment_state="InProgress",
dialog_action_type="ConfirmIntent",
slots="mock slot values",
active_context=active_context,
)
# last order does not exist
mock_check_last_order.return_value = (False, 'mock last order')
handle_empty_slots(intent_request, locale_id)
mock_respond.assert_called_with(
intent_request,
message="mock menu message",
fulfillment_state="InProgress",
dialog_action_type="ElicitSlot",
slot_to_elicit="type",
)
@patch("pizza_order.pizza_order_handler.handle_empty_slots")
@patch("pizza_order.pizza_order_handler.empty_slots")
@patch("pizza_order.pizza_order_handler.handle_full_slots")
@patch("pizza_order.pizza_order_handler.full_slots")
@patch("pizza_order.pizza_order_handler.respond")
def test_handle_repeat_order(
self,
mock_respond,
mock_full_slots,
mock_handle_full_slots,
mock_empty_slots,
mock_handle_empty_slots,
):
from pizza_order.pizza_order_handler import handle_repeat_order
intent_request = {"bot": {"localeId": "en_US"}}
order_id = "1234"
# All slot values are resolved
mock_full_slots.return_value = True
handle_repeat_order(intent_request, order_id)
mock_handle_full_slots.assert_called_with(intent_request, 'en_US', order_id)
# All slot values are empty - beginning of dialog
mock_full_slots.return_value = False
mock_empty_slots.return_value = True
handle_repeat_order(intent_request, order_id)
mock_handle_empty_slots.assert_called_with(intent_request, 'en_US')
# Slot values are neither full nor empty
mock_full_slots.return_value = False
mock_empty_slots.return_value = False
handle_repeat_order(intent_request, order_id)
mock_respond.assert_called_with(
intent_request,
message="",
fulfillment_state="InProgress",
dialog_action_type="Delegate",
)
@patch("pizza_order.pizza_order_handler.place_order")
@patch("pizza_order.pizza_order_handler.handle_repeat_order")
@patch("pizza_order.pizza_order_handler.generate_order_id", return_value="1234")
@patch("pizza_order.pizza_order_handler.logger")
def test_handle_pizza_order(
self,
mock_logger,
mock_generate_order_id,
mock_handle_repeat_order,
mock_place_order,
):
from pizza_order.pizza_order_handler import handle_pizza_order
intent_request = {"sessionState": {"intent": {"state": "InProgress"}}}
handle_pizza_order(intent_request)
mock_generate_order_id.assert_called()
# intent state is InProgress
mock_handle_repeat_order.assert_called_with(intent_request, "1234")
# intent state is ReadyForFulfillment
intent_request["sessionState"]["intent"]["state"] = "ReadyForFulfillment"
handle_pizza_order(intent_request)
mock_place_order.assert_called_with(intent_request, "1234", mock_logger)
# intent state is neither of the above cases
intent_request["sessionState"]["intent"]["state"] = "Failed"
handle_pizza_order(intent_request)
mock_logger.error.assert_called_with(intent_request)
| [
"ssdzd@amazon.com"
] | ssdzd@amazon.com |
48389b6fdbf2094b93689fa804fe01a7d8ab1541 | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/joblib/externals/cloudpickle/compat.py | 5c9b64fa44e2b5c465aaace68f869fedec8d04c4 | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | import sys
if sys.version_info < (3, 8):
try:
import pickle5 as pickle # noqa: F401
from pickle5 import Pickler # noqa: F401
except ImportError:
import pickle # noqa: F401
from pickle import _Pickler as Pickler # noqa: F401
else:
import pickle # noqa: F401
from _pickle import Pickler # noqa: F401
| [
"msaineti@icloud.com"
] | msaineti@icloud.com |
18f50591e6581dce17758291e3886582688d69b8 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-8656.py | 9c2792f6c49f56f54f694eded678bef843115df5 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,751 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if $Exp % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
7301a96c2656ea14b65a7f7512ff81e1ffb811d8 | 92d8923498758abe2bca693bbffb9ea513bc921d | /startup-flask.py | 74e26f6146ebb183d6f2272ebdcd2da92eebea1d | [] | no_license | ayseth/startup-google | 8fc1953abcd24e24e13c228adeafcfc43dc7177f | d0dc39df472c2545121d5e3493ba4bcf6c5d1745 | refs/heads/master | 2020-04-09T02:35:24.458821 | 2018-12-01T13:20:52 | 2018-12-01T13:20:52 | 159,946,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,036 | py | from flask import Flask, render_template, request, redirect, url_for, flash, jsonify
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from startup_setup import Startup, Base, Founder, User
from flask import session as login_session
import random
import string
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
app = Flask(__name__)
CLIENT_ID = json.loads(open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = 'Startup Application'
engine = create_engine('sqlite:///startup.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase +
string.digits) for x in xrange(32))
login_session['state'] = state
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'
), 401)
response.headers['Content-Type'] = 'application/json'
return response
code = request.data
try:
oauth_flow = flow_from_clientsecrets('client_secrets.json',
scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = \
make_response(json.dumps('''Failed to upgrade the
authorization code.'''), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = credentials.access_token
url = \
'https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s' \
% access_token
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = \
make_response(json.dumps('''Token's user ID doesn't
match given user ID.'''), 401)
response.headers['Content-Type'] = 'application/json'
return response
if result['issued_to'] != CLIENT_ID:
response = \
make_response(json.dumps('''Token's client ID does not
match app's.'''), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = \
make_response(json.dumps('''Current user is
already connected.'''), 200)
response.headers['Content-Type'] = 'application/json'
login_session['provider'] = 'google'
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
userinfo_url = 'https://www.googleapis.com/oauth2/v1/userinfo'
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += \
''' " style = "width: 300px;
height: 300px;border-radius: 150px;
-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '''
flash('you are now logged in as %s' % login_session['username'])
print 'done!'
return output
def createUser(login_session):
newUser = User(name=login_session['username'],
email=login_session['email'],
picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email'
]).one()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
@app.route('/gdisconnect')
def gdisconnect():
access_token = login_session.get('access_token')
if access_token is None:
response = \
make_response(json.dumps('Current user not connected.'),
401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \
% access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
del login_session['gplus_id']
del login_session['access_token']
del login_session['username']
del login_session['email']
del login_session['picture']
response = redirect(url_for('showstartups'))
flash('You are now logged out.')
return response
else:
response = \
make_response(json.dumps('Failed to revoke token for given user.',
400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/')
@app.route("/startups")
def showstartups():
startups = session.query(Startup).all()
if 'username' not in login_session:
return render_template('publicstartup.html', startups = startups)
else:
return render_template('startup.html', startups = startups)
# return "This will show startups"
@app.route("/startups/<int:startup_id>/founders", methods=['GET', 'POST'])
def showfounder(startup_id):
startup_1 = session.query(Startup).filter_by(id=startup_id).one()
details = session.query(Founder).filter_by(startup_id=startup_id).all()
creator = getUserInfo(startup_1.user_id)
if 'username' not in login_session or creator.id != login_session['user_id']:
return render_template('publicfounders.html', startup_1=startup_1, details=details, creator=creator)
else:
if request.method == 'POST':
newsfounder = Founder(name=request.form['name'], bio=request.form['bio'], startup_id=startup_id, user_id=login_session['user_id'])
session.add(newsfounder)
session.commit()
flash("Founder Added successfully")
return redirect(url_for('showfounder', startup_id=startup_id))
else:
return render_template('founders.html', startup_1=startup_1, details=details, creator=creator)
# return "This page will show founders"
@app.route("/startups/<int:founder_id>/edit/founder", methods=['GET', 'POST'])
def editfounder(founder_id):
editfounder = session.query(Founder).filter_by(id=founder_id).one()
if 'username' not in login_session:
return redirect('/login')
if editfounder.user_id != login_session['user_id']:
return redirect(url_for('showfounder', startup_id=editfounder.startup_id))
if request.method == 'POST':
if request.form['name']:
editfounder.name = request.form['name']
if request.form['bio']:
editfounder.bio = request.form['bio']
session.add(editfounder)
session.commit()
flash("Founder Edited successfully")
return redirect(url_for('showfounder', startup_id=editfounder.startup_id))
else:
return render_template('editfounder.html', edit=editfounder)
@app.route("/startups/<int:founder_id>/delete/founder", methods=['GET', 'POST'])
def deletefounder(founder_id):
deletefounder = session.query(Founder).filter_by(id=founder_id).one()
if 'username' not in login_session:
return redirect('/login')
if deletefounder.user_id != login_session['user_id']:
return redirect(url_for('showfounder', startup_id=deletefounder.startup_id))
if request.method == 'POST':
session.delete(deletefounder)
session.commit()
flash("Founder Deleted successfully")
return redirect(url_for('showfounder', startup_id=deletefounder.startup_id))
else:
return render_template('deletefounder.html', delete=deletefounder)
@app.route("/startups/new", methods=['GET', 'POST'])
def newstartup():
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newstartup = Startup(name=request.form['name'], user_id=login_session['user_id'])
session.add(newstartup)
session.commit()
flash("Startup Added successfully")
return redirect(url_for('showstartups'))
else:
return render_template('newstartup.html')
# return "This page will cretae a new startup"
@app.route("/startups/<int:startupid>/edit", methods=['GET', 'POST'])
def editstartup(startupid):
editedstartup = session.query(Startup).filter_by(id=startupid).one()
if 'username' not in login_session:
return redirect('/login')
if editedstartup.user_id != login_session['user_id']:
return redirect(url_for('showfounder', startup_id=startupid))
if request.method == 'POST':
if request.form['name']:
editedstartup.name = request.form['name']
session.add(editedstartup)
session.commit()
flash("Startup Edited successfully")
return redirect(url_for('showfounder', startup_id=startupid))
else:
return render_template('editstartup.html', edit=editedstartup)
# return "This page is used to edit startup id"
@app.route("/startups/<int:startup_id>/delete", methods=['GET', 'POST'])
def deletestartup(startup_id):
delstartup = session.query(Startup).filter_by(id=startup_id).one()
if 'username' not in login_session:
return redirect('/login')
if restaurantToDelete.user_id != login_session['user_id']:
return redirect(url_for('showstartups'))
if request.method == 'POST':
session.delete(delstartup)
session.commit()
flash("Startup Deleted successfully")
return redirect(url_for('showstartups'))
else:
return render_template('deletestartup.html', delstartup=delstartup)
# return "This page is used to delete startup id"
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000)
| [
"vagrant@vagrant.vm"
] | vagrant@vagrant.vm |
589542efedef49898e64d4db77cf1f9299ac88b1 | bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6 | /AtCoder/abc/159f_2.py | 049a6b840d367d62419a04a1c193478f8eb82049 | [] | no_license | y-oksaku/Competitive-Programming | 3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db | a3ff52f538329bed034d3008e051f30442aaadae | refs/heads/master | 2021-06-11T16:14:12.635947 | 2021-05-04T08:18:35 | 2021-05-04T08:18:35 | 188,639,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | import numpy as np
N, S = map(int, input().split())
A = list(map(int, input().split()))
MOD = 998244353
ans = 0
dp = np.zeros(S + 1, dtype=np.int64)
for a in A:
dp[0] += 1
dp[a:] += dp[: -a].copy()
dp %= MOD
ans += dp[S]
print(ans % MOD)
| [
"y.oksaku@stu.kanazawa-u.ac.jp"
] | y.oksaku@stu.kanazawa-u.ac.jp |
faecb62da3b6501321d2d5f7f697bfab19b9eac1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02632/s582472876.py | 7e39caf5135fbed12b3d6f7935c28c146a39507d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | from sys import stdin
input = stdin.readline
def nCr(n,r):
return fact[n]*(inv[n-r]%M)*(inv[r]%M)
M = 1000000007
k = int(input())
s = len(input().strip())
t = k+s
fact = [1]
p25 = [1]
for i in range(1,t+1):
fact += [(fact[-1] * i)%M]
if i<=k:
p25 += [(p25[-1] * 25) % M]
inv = [1]*(t+1)
inv[t] = pow(fact[t], M-2, M)
for i in range(t-1, -1, -1):
inv[i] = inv[i+1] * (i+1) % M
res = 0
for i in range(k+1):
res+=nCr(t,t-i)*p25[i]
res%=M
print(res) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d10d4cb19fb71334d599eb552493f71b6dfcfe64 | 1714aec212ce8132239dd94d47f1b5dee6986d2b | /nutrition/handlers/base.py | 4ec694f354a11914916a6824695dd95d54f95a12 | [
"BSD-3-Clause"
] | permissive | caktus/rapidsms-nutrition | 7030c45638f1931ff60f5ad8ea439fe48948bc30 | b8cbce5bf0a8d9b0f725bf0ec4d7a8e46e5f5be5 | refs/heads/master | 2020-06-04T05:57:10.561254 | 2013-04-03T13:43:17 | 2013-04-04T08:33:08 | 8,168,464 | 2 | 1 | null | 2013-04-04T13:54:39 | 2013-02-12T22:07:20 | Python | UTF-8 | Python | false | false | 3,498 | py | from __future__ import unicode_literals
import logging
import re
from django.utils.translation import ugettext_lazy as _
__all__ = ['NutritionHandlerBase']
logger = logging.getLogger(__name__)
class NutritionHandlerBase(object):
prefix = 'nutrition' # Common prefix for all Nutrition messages.
keyword = None
form_class = None # Form used to process data.
_common_messages = { # Messages common to most or all Nutrition handlers.
'form_error': _('Sorry, an error occurred while processing your '
'message: {message}'),
'error': _('Sorry, an unexpected error occurred while processing your '
'message. Please contact your administrator if this '
'continues to occur.'),
}
_messages = {} # Handler-specific messages.
@classmethod
def _colloquial_keyword(cls):
"""If the class has multiple keyword choices, return the first."""
return cls.keyword.split('|')[0]
def _get_form(self, data):
return self.form_class(data, raw_text=self.raw_text,
connection=self.connection)
@classmethod
def _keyword(cls):
"""Override the KeywordHandler method to also require prefix."""
args = (cls.prefix, cls.keyword)
pattern = r'^\s*(?:%s)\s*(?:%s)(?:[\s,;:]+(.+))?$' % args
return re.compile(pattern, re.IGNORECASE)
def _parse(self, raw_text):
"""Tokenize message text and return parsed data.
Raises ValueError if the message cannot be parsed.
"""
raise NotImplemented('Subclass must define _parse method.')
def _process(self, parsed):
"""Validate and act upon parsed message data."""
raise NotImplemented('Subclass must define _process method.')
def _respond(self, msg_type, **kwargs):
"""Shortcut to retrieve and format a message."""
data = { # Some common data.
'prefix': self.prefix.upper(),
'keyword': self._colloquial_keyword().upper(),
}
data.update(**kwargs)
if msg_type in self._messages:
return self.respond(self._messages[msg_type].format(**data))
if msg_type in self._common_messages:
return self.respond(self._common_messages[msg_type].format(**data))
raise KeyError('Message type {0} not found.'.format(msg_type))
def handle(self, text):
"""
Entry point of the handler. This method takes care of a few common
tasks then calls the subclass-specific process method.
"""
self.raw_text = self.msg.text
# The reporter will be determined from the message connection.
self.connection = self.msg.connection
logger.debug('Received {keyword} message from {connection}.'.format(
keyword=self._colloquial_keyword(), connection=self.connection))
# Parse the message into its components.
try:
parsed = self._parse(text)
except ValueError as e:
logger.exception('An exception occurred while parsing the message')
self._respond('format_error')
return
else:
data = ', '.join([': '.join((k, v)) for k, v in parsed.items()])
logger.debug('Parsed {keyword} data: {data}'.format(
keyword=self._colloquial_keyword(), data=data))
self._process(parsed) # Subclasses must process parsed data.
def help(self):
self._respond('help')
| [
"rebecca@caktusgroup.com"
] | rebecca@caktusgroup.com |
8bf454ff2505eacbbc560edcb0a50c187edc4223 | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /load_balancer_fuzzer_mcs/interreplay_51_l_5/replay_config.py | 0fe6e986e1ae1294d9acaed7ea131f16807b2ac6 | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import Replayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pox.py --verbose --unthreaded-sh misc.ip_loadbalancer --ip=123.123.1.3 --servers=123.123.2.3,123.123.1.3 sts.util.socket_mux.pox_monkeypatcher openflow.discovery openflow.of_01 --address=__address__ --port=__port__', label='c1', address='127.0.0.1', cwd='dart_pox')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=True,
kill_controllers_on_exit=True)
control_flow = Replayer(simulation_config, "experiments/load_balancer_fuzzer_mcs/interreplay_51_l_5/events.trace",
input_logger=InputLogger(),
wait_on_deterministic_values=False,
allow_unexpected_messages=False,
delay_flow_mods=False,
default_dp_permit=False,
pass_through_whitelisted_messages=False,
invariant_check_name='check_for_ofp_error',
bug_signature="ERROR_SENT")
| [
"jefflai2@gmail.com"
] | jefflai2@gmail.com |
2ed0662fb580440d9985f67a8cf23f795d1d85a2 | 5a5e0a01efa6ef0961992e53bb4f64840f93150b | /RegressionVisualizer/RegressionVisualizer/settings.py | 760cbb64ec65faed0bc8c07e525ddb8b3182923f | [] | no_license | scotteskridge/RegressionApp | ed059e3205ab54061129779404345b55c0dee75c | 68932a9c94235a1e8bd6cd71a765b545f2266189 | refs/heads/master | 2021-01-19T20:48:13.495541 | 2017-04-25T02:39:49 | 2017-04-25T02:39:56 | 88,555,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,330 | py | """
Django settings for RegressionVisualizer project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ez!hio8_m#71hdjl@#4efwg1(zgy!n7qtmk(ctst)9a$7ae+f='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', u'localhost']
# Application definition
INSTALLED_APPS = [
'apps.regressions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jquery',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'RegressionVisualizer.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'RegressionVisualizer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn")
| [
"scott.eskridge@gmail.com"
] | scott.eskridge@gmail.com |
2ae3d9db09e1f7591f0c08d8fc03e8fc17f6af93 | c386fd8f8377990f13c8059154eb9ef473534355 | /scripts/srl_parse.py | 27e62e85d3f08aa34e2bdcaa719f63d391fc9f2c | [] | no_license | KOPFYF/SatiricalNews | 9ef9601821b872cc69b8319910cba1e99f778270 | 374ae8cc4a1df936f8f950c33cb694bb42c5f2e9 | refs/heads/master | 2021-07-12T04:05:56.733080 | 2020-06-27T03:48:08 | 2020-06-27T03:48:08 | 148,247,657 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,662 | py | import os
import time
import re
import spacy
nlp = spacy.load('en_core_web_sm')
from graph import *
from allennlp.predictors import Predictor
import data_utilities as du
import arg_parser as ap
import collections
import argparse
import utilities as util
import pdb
from pickle import load, dump
if os.path.exists('../pretrained'):
print('models already downloaded')
srl_predictor = Predictor.from_path('../pretrained/srl-model-2018.05.25.tar.gz')
coref_predictor = Predictor.from_path('../pretrained/coref-model-2018.02.05.tar.gz')
else:
print('downloading models...')
srl_predictor = Predictor.from_path("https://s3-us-west-2.amazonaws.com/allennlp/models/srl-model-2018.05.25.tar.gz")
coref_predictor = Predictor.from_path("https://s3-us-west-2.amazonaws.com/allennlp/models/coref-model-2018.02.05.tar.gz")
# load data
# args = ap.get_args()
# docs = []
# docs += du.load_sent('../datasets/bbcnews.txt')
# word_dict = util.build_dict(docs)
# inv_dict = util.build_inv_dict(word_dict)
# # print('word_dict:', word_dict)
# # print('inv_dict:', inv_dict)
# word_embed = util.words2embedding(word_dict, 100, args.embedding_file)
# doc = ' '.join(docs)
def load_data(args):
global word_dict, word_embed
docs = []
docs += du.load_sent('../datasets/bbcnews.txt') # BBC_news
# docs += du.load_sent('../datasets/BBC_news.txt')
word_dict = util.build_dict(docs)
# inv_dict = util.build_inv_dict(word_dict)
word_embed = util.words2embedding(word_dict, 100, args.embedding_file)
print('word_dict:', word_dict)
with open('../datasets/word_dict', 'wb') as fid:
dump(word_dict, fid)
doc = ' '.join(docs)
return doc
def get_srl(sent):
# use pretrained AllenNLP model
# results_srl = srl_predictor.predict(sentence=str(sent))
# for verb_dict in results_srl["verbs"]:
# if 'ARG' in verb_dict['description']:
# return verb_dict
res = srl_predictor.predict(sentence=str(sent))
print(res)
min_O = float('inf')
min_id = 0
if len(res["verbs"]) == 0:
print('No verbs, need debug')
return False
# if len(res["verbs"]) == 1:
# verb_dict = res["verbs"][0]
# if 'ARG' in verb_dict['description']:
# return verb_dict
for idx, verb_dict in enumerate(res["verbs"]):
# print(verb_dict["tags"])
num_O = collections.Counter(verb_dict["tags"])['O']
if num_O < min_O:
min_id = idx
# print(res["verbs"][min_id])
min_O = min(min_O,num_O)
# print('min_O:',min_O, 'min_id:',min_id)
verb_dict = res["verbs"][min_id]
print(verb_dict['description'])
if 'ARG' in verb_dict['description']:
return verb_dict
else:
print('dead loop')
# Deal with sent with no verb.
# TODO: we might need a hashtable to store recurring verbs so that the space don't blow up.
# define global variable predv, arg0v, arg1v, arg2v
# global predv, arg0v, arg1v, arg2v = None, None, None, None
predv, arg0v, arg1v, arg2v = None, None, None, None
def build_path(sent):
global predv, arg0v, arg1v, arg2v
quotation = re.search(r"^(\")(.*)(\")$", str(sent))
if quotation:
# use the arg0v and predv in the last sentence
arg1v = Vertex(quotation.group(1), 'ARG1')
arg2v = None
# print(predv, arg0v)
else:
srl = get_srl(sent)
if srl:
# for debugging
# print('description:',srl['description'])
if re.search(r"(\[ARG0: )(.*?)(\])",srl['description']):
arg0 = re.search(r"(\[ARG0: )(.*?)(\])",srl['description'])[2]
else:
arg0 = None
if re.search(r"(\[ARG1: )(.*?)(\])",srl['description']):
arg1 = re.search(r"(\[ARG1: )(.*?)(\])",srl['description'])[2]
else:
arg1 = None
if re.search(r"(\[ARG2: )(.*?)(\])",srl['description']):
arg2 = re.search(r"(\[ARG2: )(.*?)(\])",srl['description'])[2]
else:
arg2 = None
# print('arg0:',arg0)
# print('arg1:',arg1)
# mod: ARGM-DIS, ARGM-TMP, ARGM-ADV
if re.search(r"(\[ARGM-.*?: )(.*?)(\])",srl['description']):
mod = re.search(r"(\[ARGM-.*?: )(.*?)(\])",srl['description'])[2]
# for debugging
print('mod',mod)
else:
# print('debug:',srl['description'])
mod = None
verb = srl['verb']
# convert verb to lemma_
# verb = nlp(verb)[0].lemma_
# for debugging
# print('verb:',verb)
# create 4 vertices
predv = Vertex(verb,'PRED')
util.vertex2wordidx(predv, word_dict, True)
# if no arg0, then will not create vertex arg0v and edge arg0e
if arg0:
arg0v = Vertex(arg0, 'ARG0')
# util.vertex2wordidx(arg0v, word_dict)
# print('-------arg0v.word_idx:', arg0v.word_idx)
else:
arg0v = None
if arg1:
arg1v = Vertex(arg1, 'ARG1')
# util.vertex2wordidx(arg1v, word_dict)
else:
arg1v = None
if arg2:
arg2v = Vertex(arg2, 'ARG2')
# util.vertex2wordidx(arg2v, word_dict)
else:
arg2v = None
# =======
# verb = srl['verb']
# # convert verb to lemma_
# # verb = nlp(verb)[0].lemma_
# # for debugging
# # print('verb:',verb)
# # create 4 vertices
# predv = Vertex(verb,'PRED')
# # if no arg0, then will not create vertex arg0v and edge arg0e
# arg0v = Vertex(arg0, 'ARG0') if arg0 else None
# arg1v = Vertex(arg1, 'ARG1') if arg1 else None
# arg2v = Vertex(arg2, 'ARG2') if arg2 else None
# if mod:
# modv = Vertex(mod, 'MOD')
else:
<<<<<<< HEAD
# No verb
predv, arg0v, arg1v, arg2v = None, None, None, None
=======
arg1v = None
if arg2:
arg2v = Vertex(arg2, 'ARG2')
# util.vertex2wordidx(arg2v, word_dict)
else:
arg2v = None
# if mod:
# modv = Vertex(mod, 'MOD')
>>>>>>> master
res = [predv]
res.append(arg0v)
res.append(arg1v)
res.append(arg2v)
if arg0v:
arg0v.path = res
# arg0v.arg_range1by1 = list(range(arg0v.arg_range[0],arg0v.arg_range[1]+1))
util.vertex2wordidx(arg0v, word_dict, True)
if arg1v:
arg1v.path = res
# arg1v.arg_range1by1 = list(range(arg1v.arg_range[0],arg1v.arg_range[1]+1))
util.vertex2wordidx(arg1v, word_dict, True)
if arg2v:
arg2v.path = res
util.vertex2wordidx(arg2v, word_dict, True)
# arg2v.arg_range1by1 = list(range(arg2v.arg_range[0],arg2v.arg_range[1]+1))
# print('*********** in build path')
# print(res)
return res
def cluster_index2name(ls_2d,results_coref):
'''
ls_2d: [[34, 41], [95, 98]]
return cluster_name: ["North Korea 's leader Kim Jong - un", 'Kim Jong - un']
'''
word_ls = results_coref['document']
clusters = results_coref['clusters']
cluster_name = []
for inter in ls_2d:
inter_name = " ".join(word_ls[inter[0]:inter[1]+1])
cluster_name.append(inter_name)
return cluster_name
def args2index(arg,sent):
# return [sent.find(arg),sent.find(arg)+len(arg)] sentence index
# also return if arg == None
# input: vertex, sent string
v = arg
if arg:
arg = arg.val.split()
sent = [token.text for token in sent]
# print('in args2index')
# print('arg:',arg,'\n')
# print('sent:',sent,'\n')
for i in range(len(sent)-len(arg)+1):
quotation = False
if sent[i] == arg[0] and sent[i+len(arg)-1] == arg[len(arg)-1]:
# arg found! : [3, 3] ['carried']
# print('arg found! :',[i,i+len(arg)-1],sent[i:i+len(arg)],'\n')
# return [i,i+len(arg)-1], arg == None
v.arg_range = [i,i+len(arg)-1]
return True
else:
# quotation = True
return False
else:
# when arg == None
return False
def build_list(doc):
doc = nlp(doc)
# pred_ls = []
# arg0_ls = []
# arg1_ls = []
# arg2_ls = []
pred_ls, arg0_ls, arg1_ls, arg2_ls = set(),set(),set(),set()
for sent in doc.sents:
doc_path = build_path(sent)
# build pred vertex list
predv = doc_path[0]
# if predv.val not in pred_ls:
# pred_ls.append(predv)
pred_ls.add(predv)
for v in doc_path[1:]:
if v:
# check None vertex
if v.type == 'ARG0':
arg0_ls.add(v)
# if v not in arg0_ls:
# arg0_ls.append(v)
elif v.type == 'ARG1':
arg1_ls.add(v)
# if v not in arg1_ls:
# arg1_ls.append(v)
elif v.type == 'ARG2':
arg2_ls.add(v)
# if v not in arg2_ls:
# arg2_ls.append(v)
else:
print('need debug')
# print(pred_ls)
# return list(pred_ls), list(arg0_ls), list(arg1_ls), list(arg2_ls)
return list(pred_ls), arg0_ls, arg1_ls, arg2_ls
def hash_vertex(doc):
'''
return mappings from vertices to their index range
'''
doc = nlp(doc)
vertice_map = []
pos = 0
for sent in doc.sents:
# for each sentence, get the 3 vertices
doc_path = build_path(sent)
# modified Aug 2
# add the predicate vertex to the graph
# graph.add_vertex(doc_path[0])
for v in doc_path[1:]:
if args2index(v,sent):
v.arg_range = [v.arg_range[0]+pos,v.arg_range[1]+pos]
vertice_map.append((v, v.arg_range))
# vertice_map.append((v, [v.arg_range[0]+pos,v.arg_range[1]+pos]))
pos = pos + len(sent)
return vertice_map
def get_coref(doc):
'''
input: the document
return: clusters_names: ["North Korea 's leader Kim Jong - un", 'Kim Jong - un']
clusters_indexs: [[34, 41], [95, 98]]
'''
results_coref = coref_predictor.predict(document=doc)
# for debugging
# print(results_coref)
clusters_names = []
clusters_indexs = []
for index, cluster in enumerate(results_coref["clusters"]):
name = cluster_index2name(cluster,results_coref)
clusters_names.append(name)
clusters_indexs.append(cluster)
## for debugging
print(index, cluster,'\n')
print(index, name,'\n')
# clusters_ = zip(clusters_names,clusters_indexs)
# return clusters_
return clusters_names, clusters_indexs
def build_unique_list(doc, vert_ls, clusters_names, clusters_indexs):
'''
build unique list of arg0, arg1 and arg2
return 2d list, while rows with different length
for ex, if some arg0s in one group, they are added into the same row
'''
# global vert_ls
# global clusters_
# vert_ls = hash_vertex(doc)
pred_ls, arg0_ls, arg1_ls, arg2_ls = build_list(doc)
# clusters_ = get_coref(doc)
# clusters_names, clusters_indexs = get_coref(doc)
arg0_2d_ls = []
arg1_2d_ls = []
arg2_2d_ls = []
# for clusters_name,clusters_index in clusters_:
for clusters_name,clusters_index in zip(clusters_names, clusters_indexs):
group0_ls = []
group1_ls = []
group2_ls = []
for single_range in clusters_index:
vertex = bin_search(single_range, vert_ls)
if vertex:
if vertex.type == 'ARG0':
group0_ls += [vertex]
# arg0_ls.remove(vertex)
arg0_ls.discard(vertex)
elif vertex.type == 'ARG1':
group1_ls += [vertex]
# arg1_ls.remove(vertex)
arg1_ls.discard(vertex)
elif vertex.type == 'ARG2':
group2_ls += [vertex]
# arg2_ls.remove(vertex)
arg2_ls.discard(vertex)
else:
print('need debug')
break
if group0_ls:
arg0_2d_ls.append(group0_ls)
if group1_ls:
arg1_2d_ls.append(group1_ls)
if group2_ls:
arg2_2d_ls.append(group2_ls)
# add non-coref
# The bug is that I only add coref-args into the unique list, but not the non-coref part like Vertex('Pyongyang')
# The bug comes again with []. Need to cope with non-coref part
arg0_ls, arg1_ls, arg2_ls = list(arg0_ls), list(arg1_ls), list(arg2_ls)
for noncoref in arg0_ls:
# print(noncoref,'debug')
# util.vertex2wordidx(noncoref, word_dict, True)
# print('noncoref 0:',noncoref.word_idx)
arg0_2d_ls.append([noncoref])
for noncoref in arg1_ls:
# util.vertex2wordidx(noncoref, word_dict, True)
arg1_2d_ls.append([noncoref])
for noncoref in arg2_ls:
# util.vertex2wordidx(noncoref, word_dict, True)
arg2_2d_ls.append([noncoref])
print('Unique_ls_arg0:','\n', arg0_2d_ls)
print('Unique_ls_arg1:','\n', arg1_2d_ls)
print('Unique_ls_arg2:','\n', arg2_2d_ls)
print('Unique_ls_pred:','\n', pred_ls)
return pred_ls, arg0_2d_ls, arg1_2d_ls, arg2_2d_ls
def args2dls2dic(ls_2d):
'''
input arg0_2d_ls
return hash dict
'''
d = {}
for idx, group in enumerate(ls_2d):
for item in group:
d[item] = idx
return d
def build_args_dicts(doc, vert_ls, clusters_names, clusters_indexs):
pred_ls, arg0_2d_ls, arg1_2d_ls, arg2_2d_ls = build_unique_list(doc, vert_ls, clusters_names, clusters_indexs)
arg0_dict = args2dls2dic(arg0_2d_ls)
arg1_dict = args2dls2dic(arg1_2d_ls)
arg2_dict = args2dls2dic(arg2_2d_ls)
return arg0_dict, arg1_dict, arg2_dict, pred_ls, arg0_2d_ls, arg1_2d_ls, arg2_2d_ls
def vertex2idx(vertex, pred_ls, arg0_dict, arg1_dict, arg2_dict):
'''
input vertex, return its idx
idx 0 for None
idx 1 for ...? Maybe unknown
'''
if vertex.type == 'PRED':
return pred_ls.index(vertex) + 2
elif vertex.type == 'ARG0':
return arg0_dict[vertex] + 2
elif vertex.type == 'ARG1':
return arg1_dict[vertex] + 2
elif vertex.type == 'ARG2':
return arg2_dict[vertex] + 2
else:
print('need debug')
def path2idx(path, pred_ls, arg0_dict, arg1_dict, arg2_dict):
'''
input vertex.path(may contain None)
return vertex.idx_path
'''
idx_path = []
for v in path:
if v:
idx_path.append(vertex2idx(v, pred_ls, arg0_dict, arg1_dict, arg2_dict))
else:
# idx 0 for None
idx_path.append(0)
return idx_path
def collapse(fix_range, arg_range, coref_range):
'''
coref range: [40,41]
fix range: [9,10]
arg range: [38,39,40,41,42]
output: [38, 39, 9, 10, 42]
fix_range: [8, 16]
arg_range: [57]
coref_range: [57, 57]
output: [8, 16]
'''
# print('in collapse...')
# print('fix_range:', fix_range)
# print('arg_range:', arg_range)
# print('coref_range:', coref_range)
left, right = 0, 0
for idx, n in enumerate(arg_range):
if n == coref_range[0]:
left = idx
break
for idx, n in enumerate(arg_range[::-1]):
if n == coref_range[1]:
right = len(arg_range) - idx
break
# print('left:', arg_range[:left], type(arg_range[:left]))
# print('right:', arg_range[right:])
# print('mid:', list(range(fix_range[0],fix_range[1]+1)))
return arg_range[:left] + list(range(fix_range[0],fix_range[-1]+1)) + arg_range[right:]
def collapse_coref(doc, clusters_names, clusters_indexs, vert_ls):
'''
update self.arg_range by coref
'''
# vert_ls = hash_vertex(doc)
# clusters_ = get_coref(doc)
doc = nlp(doc)
for clusters_name,clusters_index in zip(clusters_names, clusters_indexs):
if len(clusters_name) > 1:
fix_range = clusters_index[0]
# print('fix_range:', fix_range)
for item, coref_range in zip(clusters_name[1:], clusters_index[1:]):
# print(item, coref_range)
vertex = bin_search(coref_range, vert_ls)
if vertex:
# update vertex.arg_range1by1
print('vertex ',vertex)
left, right = vertex.arg_range[0], vertex.arg_range[1]
# print(left, right)
vertex.arg_range1by1 = collapse(fix_range, list(range(left, right+1)), coref_range)
print('vertex.arg_range1by1:', vertex.arg_range1by1)
tmp = []
for idx in vertex.arg_range1by1:
tmp.append(str(doc[idx]))
vertex.arg_str = " ".join(tmp)
util.vertex2wordidx(vertex, word_dict, False)
print('-------vertex.word_idx:', vertex.word_idx)
print('collapse completed!')
def build_event_chain(doc, pred_ls, arg0_dict, arg1_dict, arg2_dict, vert_ls, clusters_names, clusters_indexs):
'''
build event chain based on coref
return 2d event chain [[vertex.path]] while vertex.path is a event path(1d list)
'''
# vert_ls = hash_vertex(doc)
# clusters_names, clusters_indexs = get_coref(doc)
# Aug 13th
collapse_coref(doc, clusters_names, clusters_indexs, vert_ls)
echain_2d_ls_arg0 = []
echain_2d_ls_arg1 = []
echain_2d_ls_arg2 = []
# pdb.set_trace()
for clusters_name, clusters_index in zip(clusters_names, clusters_indexs):
echain_ls_arg0 = []
echain_ls_arg1 = []
echain_ls_arg2 = []
# echain_ls_arg0 = set()
# echain_ls_arg1 = set()
# echain_ls_arg2 = set()
for single_range in clusters_index:
vertex = bin_search(single_range, vert_ls)
if vertex:
print('vertex:', vertex.val)
print('vertex.word_idx:', vertex.word_idx)
if vertex.type == 'ARG0':
# vertex.idx_path = path2idx(vertex.path, pred_ls, arg0_dict, arg1_dict, arg2_dict)
word_idx_path = []
for arg in vertex.path:
if arg:
word_idx_path.append(arg.word_idx)
else:
word_idx_path.append([0])
# echain_ls_arg0.append(vertex.idx_path)
# Aug 13
echain_ls_arg0.append(word_idx_path)
elif vertex.type == 'ARG1':
# vertex.idx_path = path2idx(vertex.path, pred_ls, arg0_dict, arg1_dict, arg2_dict)
word_idx_path = []
for arg in vertex.path:
if arg:
word_idx_path.append(arg.word_idx)
else:
word_idx_path.append([0])
# echain_ls_arg1.append(vertex.idx_path)
# echain_ls_arg1.append(vertex.word_idx)
echain_ls_arg1.append(word_idx_path)
elif vertex.type == 'ARG2':
# vertex.idx_path = path2idx(vertex.path, pred_ls, arg0_dict, arg1_dict, arg2_dict)
# echain_ls_arg2.append(vertex.idx_path)
word_idx_path = []
for arg in vertex.path:
if arg:
word_idx_path.append(arg.word_idx)
else:
word_idx_path.append([0])
echain_ls_arg2.append(word_idx_path)
# echain_ls_arg2.append(vertex.word_idx)
else:
print('need debug')
break
# echain_ls_arg0 = list(echain_ls_arg0)
# echain_ls_arg1 = list(echain_ls_arg1)
# echain_ls_arg2 = list(echain_ls_arg2)
if echain_ls_arg0:
echain_2d_ls_arg0.append(echain_ls_arg0)
if echain_ls_arg1:
echain_2d_ls_arg1.append(echain_ls_arg1)
if echain_ls_arg2:
echain_2d_ls_arg2.append(echain_ls_arg2)
print('echain_2d_ls_arg0:','\n', echain_2d_ls_arg0)
print('echain_2d_ls_arg1:','\n', echain_2d_ls_arg1)
print('echain_2d_ls_arg2:','\n', echain_2d_ls_arg2)
return echain_2d_ls_arg0, echain_2d_ls_arg1, echain_2d_ls_arg2
def build_graph(doc):
# graph = Graph()
# vert_ls = hash_vertex(doc, graph)
vert_ls = hash_vertex(doc)
clusters_names, clusters_indexs = get_coref(doc)
# transform the clusters from list of list of indices to list of list of names
coref_2d_ls = []
for clusters_name,clusters_index in zip(clusters_names, clusters_indexs):
coref_ls = []
for single_range in clusters_index:
# Binary search
# add the resulting vertex to the list
vertex = bin_search(single_range, vert_ls)
# check : the coreference should be a vertex in the list
# assert vertex is not None, "vertex not found"
if vertex:
coref_ls.append(vertex)
coref_2d_ls.append(coref_ls)
# print('coref_2d_ls:','\n', coref_2d_ls)
# build 'coref' edges
for coref_ls in coref_2d_ls:
if len(coref_ls) > 1:
for v in coref_ls[1:]:
# coref_e = Edge(coref_ls[0],v, 'COREF')
# coref_ls[0].add_edge(coref_e)
# modified Aug 2nd
# add reverse edge
rev_coref_e = Edge(v, coref_ls[0], 'COREF')
v.add_edge(rev_coref_e)
def rangeInRange(small,big):
'''
small: coref range
big: arg range
'''
return small[0]>= big[0] and small[1]<= big[1]
# TODO: test binary search
# TODO: maybe if the coref is in one vertex, we should not just think the entire thing as a coreferenced entity
# for example, "that he is happy" should not have the same embedding as the entity 'he' is referring to
# rather, "he" should have the same embedding
def bin_search(single_range, vert_ls):
# vert_ls is a sorted list by the range of indices
begin, end = 0, len(vert_ls) - 1
# print(single_range)
while begin <= end:
# print(begin, end)
# if begin == 0 and end == 2:
# raise Exception
mid = (begin+end) // 2
vert_range = vert_ls[mid][1]
if rangeInRange(single_range, vert_range):
# we know that the query has to be contained in one of the list element
return vert_ls[mid][0]
if single_range[0] > vert_range[1]:
# the range of the query is bigger
begin = mid + 1
elif single_range[1] < vert_range[0]:
# the pick is bigger, search in lowe half
end = mid - 1
else:
return None
# if got here, then the vertex is not found
return None
def build_event_chain_sample(doc):
vert_ls = hash_vertex(doc)
clusters_names, clusters_indexs = get_coref(doc)
# pred_ls, arg0_ls, arg1_ls, arg2_ls = build_list(doc)
# print('arg0_ls:', arg0_ls)
# print('arg1_ls:', arg1_ls)
# print('arg2_ls:', arg2_ls)
'''
arg0_ls: [Vertex('Pyongyang'), Vertex('President Trump'), Vertex('US officials'), Vertex('the two leaders'), Vertex('President Donald Trump'), Vertex('North Korea'), Vertex('by US - based monitoring group 38 North')]
'''
# pred_ls, arg0_2d_ls, arg1_2d_ls, arg2_2d_ls = build_unique_list(doc)
# len_ls = [len(pred_ls), len(arg0_2d_ls), len(arg1_2d_ls), len(arg2_2d_ls)]
# print('pred_ls:', pred_ls, len(pred_ls))
# print('arg0_2d_ls:', arg0_2d_ls, len(arg0_2d_ls))
# print('arg1_2d_ls:', arg1_2d_ls, len(arg1_2d_ls))
# print('arg2_2d_ls:', arg2_2d_ls, len(arg2_2d_ls))
arg0_dict, arg1_dict, arg2_dict, pred_ls, arg0_2d_ls, arg1_2d_ls, arg2_2d_ls = build_args_dicts(doc, vert_ls, clusters_names, clusters_indexs)
len_ls = [len(pred_ls), len(arg0_2d_ls), len(arg1_2d_ls), len(arg2_2d_ls)]
'''
pred: [Vertex('sign'), Vertex('maintain'), Vertex('suspect'), Vertex('see'), Vertex('criticise'), Vertex('carry'), Vertex('say')]
arg0_dict: {Vertex('President Donald Trump'): 0, Vertex('President Trump'): 0, Vertex('North Korea'): 1, Vertex('Pyongyang'): 2, Vertex('US officials'): 3, Vertex('by US - based monitoring group 38 North'): 4, Vertex('the two leaders'): 5}
arg1_dict: {Vertex('that Sohae is a satellite launch site'): 0, Vertex('that it has been used to test ballistic missiles'): 0, Vertex('that he was " very happy " with the progress in relations with North Korea'): 4, Vertex('North Korea 's leader Kim Jong - un had vowed to destroy an engine test site'): 4, Vertex('a deal to work towards the " complete denuclearisation of the Korean Peninsula "'): 3, Vertex('the deal'): 3, Vertex('a total of six nuclear tests , the most recent of which took place in September last year'): 5, Vertex('the Sohae station'): 6}
arg2_dict: {Vertex('for a lack of details on when or how Pyongyang would renounce its nuclear weapons'): 0}
'''
# print('arg0_dict:', arg0_dict)
# print('arg1_dict:', arg1_dict)
# print('arg2_dict:', arg2_dict)
echain_2d_ls_arg0, echain_2d_ls_arg1, echain_2d_ls_arg2 = build_event_chain(doc, pred_ls, arg0_dict, arg1_dict, arg2_dict, vert_ls, clusters_names, clusters_indexs)
echain_ls = echain_2d_ls_arg0 + echain_2d_ls_arg1 + echain_2d_ls_arg2
print('****** len_ls:',len_ls)
print('****** echain_ls:',echain_ls)
with open('../datasets/lenls', 'wb') as fid:
dump(len_ls, fid)
with open('../datasets/echainls', 'wb') as fid:
dump(echain_ls,fid)
return len_ls, echain_ls
if __name__ == '__main__':
args = ap.get_args()
# Aug 12th modifying, substitution
doc = load_data(args)
build_event_chain_sample(doc)
| [
"fei8@purdue.edu"
] | fei8@purdue.edu |
5e61a9a88bbf2e849ca0a0168b0cd0c2b553b454 | 168ff01513ea134c7952db45259d24d5b8b78df6 | /virtual/lib/python3.6/site-packages/nose/plugins/attrib.py | 8ac9239d1c574b543faa0a0b623de362b1a3c6fc | [
"MIT"
] | permissive | OKC254/flask-blog | e06fc6b75182a766ef9895a77426dbd229cb6756 | 78dc43f6ba981822f17026b071db6aaf4680daad | refs/heads/master | 2022-10-08T13:30:39.448278 | 2019-06-03T07:15:47 | 2019-06-03T07:15:47 | 189,934,226 | 0 | 0 | MIT | 2022-09-16T18:02:05 | 2019-06-03T04:28:18 | Python | UTF-8 | Python | false | false | 9,666 | py | """Attribute selector plugin.
Oftentimes when testing you will want to select tests based on
criteria rather then simply by filename. For example, you might want
to run all tests except for the slow ones. You can do this with the
Attribute selector plugin by setting attributes on your test methods.
Here is an example:
.. code-block:: python
def test_big_download():
import urllib
# commence slowness...
test_big_download.slow = 1
Once you've assigned an attribute ``slow = 1`` you can exclude that
test and all other tests having the slow attribute by running ::
$ nosetests -a '!slow'
There is also a decorator available for you that will set attributes.
Here's how to set ``slow=1`` like above with the decorator:
.. code-block:: python
from nose.plugins.attrib import attr
@attr('slow')
def test_big_download():
import urllib
# commence slowness...
And here's how to set an attribute with a specific value:
.. code-block:: python
from nose.plugins.attrib import attr
@attr(speed='slow')
def test_big_download():
import urllib
# commence slowness...
This test could be run with ::
$ nosetests -a speed=slow
In Python 2.6 and higher, ``@attr`` can be used on a class to set attributes
on all its test methods at once. For example:
.. code-block:: python
from nose.plugins.attrib import attr
@attr(speed='slow')
class MyTestCase:
def test_long_integration(self):
pass
def test_end_to_end_something(self):
pass
Below is a reference to the different syntaxes available.
Simple syntax
-------------
Examples of using the ``-a`` and ``--attr`` options:
* ``nosetests -a status=stable``
Only runs tests with attribute "status" having value "stable"
* ``nosetests -a priority=2,status=stable``
Runs tests having both attributes and values
* ``nosetests -a priority=2 -a slow``
Runs tests that match either attribute
* ``nosetests -a tags=http``
If a test's ``tags`` attribute was a list and it contained the value
``http`` then it would be run
* ``nosetests -a slow``
Runs tests with the attribute ``slow`` if its value does not equal False
(False, [], "", etc...)
* ``nosetests -a '!slow'``
Runs tests that do NOT have the attribute ``slow`` or have a ``slow``
attribute that is equal to False
**NOTE**:
if your shell (like bash) interprets '!' as a special character make sure to
put single quotes around it.
Expression Evaluation
---------------------
Examples using the ``-A`` and ``--eval-attr`` options:
* ``nosetests -A "not slow"``
Evaluates the Python expression "not slow" and runs the test if True
* ``nosetests -A "(priority > 5) and not slow"``
Evaluates a complex Python expression and runs the test if True
"""
import inspect
import logging
import os
import sys
from inspect import isfunction
from nose.plugins.base import Plugin
from nose.util import tolist
log = logging.getLogger('nose.plugins.attrib')
compat_24 = sys.version_info >= (2, 4)
def attr(*args, **kwargs):
"""Decorator that adds attributes to classes or functions
for use with the Attribute (-a) plugin.
"""
def wrap_ob(ob):
for name in args:
setattr(ob, name, True)
for name, value in kwargs.items():
setattr(ob, name, value)
return ob
return wrap_ob
def get_method_attr(method, cls, attr_name, default = False):
"""Look up an attribute on a method/ function.
If the attribute isn't found there, looking it up in the
method's class, if any.
"""
Missing = object()
value = getattr(method, attr_name, Missing)
if value is Missing and cls is not None:
value = getattr(cls, attr_name, Missing)
if value is Missing:
return default
return value
class ContextHelper:
"""Object that can act as context dictionary for eval and looks up
names as attributes on a method/ function and its class.
"""
def __init__(self, method, cls):
self.method = method
self.cls = cls
def __getitem__(self, name):
return get_method_attr(self.method, self.cls, name)
class AttributeSelector(Plugin):
"""Selects test cases to be run based on their attributes.
"""
def __init__(self):
Plugin.__init__(self)
self.attribs = []
def options(self, parser, env):
"""Register command line options"""
parser.add_option("-a", "--attr",
dest="attr", action="append",
default=env.get('NOSE_ATTR'),
metavar="ATTR",
help="Run only tests that have attributes "
"specified by ATTR [NOSE_ATTR]")
# disable in < 2.4: eval can't take needed args
if compat_24:
parser.add_option("-A", "--eval-attr",
dest="eval_attr", metavar="EXPR", action="append",
default=env.get('NOSE_EVAL_ATTR'),
help="Run only tests for whose attributes "
"the Python expression EXPR evaluates "
"to True [NOSE_EVAL_ATTR]")
def configure(self, options, config):
"""Configure the plugin and system, based on selected options.
attr and eval_attr may each be lists.
self.attribs will be a list of lists of tuples. In that list, each
list is a group of attributes, all of which must match for the rule to
match.
"""
self.attribs = []
# handle python eval-expression parameter
if compat_24 and options.eval_attr:
eval_attr = tolist(options.eval_attr)
for attr in eval_attr:
# "<python expression>"
# -> eval(expr) in attribute context must be True
def eval_in_context(expr, obj, cls):
return eval(expr, None, ContextHelper(obj, cls))
self.attribs.append([(attr, eval_in_context)])
# attribute requirements are a comma separated list of
# 'key=value' pairs
if options.attr:
std_attr = tolist(options.attr)
for attr in std_attr:
# all attributes within an attribute group must match
attr_group = []
for attrib in attr.strip().split(","):
# don't die on trailing comma
if not attrib:
continue
items = attrib.split("=", 1)
if len(items) > 1:
# "name=value"
# -> 'str(obj.name) == value' must be True
key, value = items
else:
key = items[0]
if key[0] == "!":
# "!name"
# 'bool(obj.name)' must be False
key = key[1:]
value = False
else:
# "name"
# -> 'bool(obj.name)' must be True
value = True
attr_group.append((key, value))
self.attribs.append(attr_group)
if self.attribs:
self.enabled = True
def validateAttrib(self, method, cls = None):
"""Verify whether a method has the required attributes
The method is considered a match if it matches all attributes
for any attribute group.
."""
# TODO: is there a need for case-sensitive value comparison?
any = False
for group in self.attribs:
match = True
for key, value in group:
attr = get_method_attr(method, cls, key)
if callable(value):
if not value(key, method, cls):
match = False
break
elif value is True:
# value must exist and be True
if not bool(attr):
match = False
break
elif value is False:
# value must not exist or be False
if bool(attr):
match = False
break
elif type(attr) in (list, tuple):
# value must be found in the list attribute
if not str(value).lower() in [str(x).lower()
for x in attr]:
match = False
break
else:
# value must match, convert to string and compare
if (value != attr
and str(value).lower() != str(attr).lower()):
match = False
break
any = any or match
if any:
# not True because we don't want to FORCE the selection of the
# item, only say that it is acceptable
return None
return False
def wantFunction(self, function):
"""Accept the function if its attributes match.
"""
return self.validateAttrib(function)
def wantMethod(self, method):
"""Accept the method if its attributes match.
"""
try:
cls = method.__self__.__class__
except AttributeError:
return False
return self.validateAttrib(method, cls)
| [
"oscarkiplimo@gmail.com"
] | oscarkiplimo@gmail.com |
ffbc2549ab313f9a12e700c6a5e08d7cd3342fc4 | fa380310206f7e0c015be610dd9f74f7ba62e8f9 | /day2/part2.py | e358436c06caa6a400dee3ff4084cecc5d3f9b12 | [
"MIT"
] | permissive | sharkbound/advent_of_code_2016 | 71c666ce6f7e7e816dbb6e76795650ecd9f1cb48 | e655974b2dea422af4ec1debad296ee6c22d690a | refs/heads/master | 2020-12-05T03:12:53.883458 | 2020-01-13T01:24:25 | 2020-01-13T01:24:25 | 231,993,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | import numpy as np
from read import read_lines
move_offsets = {
'L': (-1, 0),
'R': (1, 0),
'U': (0, -1),
'D': (0, 1),
}
keys = np.array([
list(' 1 '),
list(' 234 '),
list('56789'),
list(' ABC '),
list(' D '),
])
def is_valid(x, y):
return all(i in range(5) for i in (x, y)) and keys[y, x] != ' '
def solve(lines):
ans = []
x, y = 0, 2
for line in lines:
for char in line:
if is_valid(x + (xoff := move_offsets[char][0]), y):
x += xoff
if is_valid(x, y + (yoff := move_offsets[char][1])):
y += yoff
ans.append(keys[y, x])
print(''.join(ans))
def main():
data = read_lines()
solve(data)
if __name__ == '__main__':
main()
| [
"laptopblaster@gmail.com"
] | laptopblaster@gmail.com |
7d1fab8330262b4f73ac33d7b013f395a6508bee | 3312b5066954cbf96c79ef3e1f3d582b31ebc5ae | /colegend/academy/models.py | 54bd2e62cf15f774ccb6431e331552ad3aa95654 | [] | no_license | Eraldo/colegend | d3f3c2c37f3bade7a3a1e10d307d49db225fe7f5 | 2e7b9d27887d7663b8d0d1930c2397c98e9fa1fc | refs/heads/master | 2021-01-16T23:32:09.245967 | 2020-10-07T12:12:14 | 2020-10-07T12:12:14 | 21,119,074 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,199 | py | from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models import Avg, Q
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.shortcuts import redirect
from wagtail.core.models import Page
from colegend.core.fields import MarkdownField
from colegend.core.models import TimeStampedBase, OwnedBase
from django.utils.translation import ugettext_lazy as _
class BookTag(models.Model):
"""
A django model representing a book's text-tag.
"""
name = models.CharField(
_('name'),
max_length=255,
unique=True
)
class Meta:
verbose_name = _('Tag')
verbose_name_plural = _('Tags')
ordering = ['name']
default_related_name = 'tags'
def __str__(self):
return self.name
class BookQuerySet(models.QuerySet):
def search(self, query):
queryset = self.filter(Q(name__icontains=query) | Q(author__icontains=query) | Q(content__icontains=query))
return queryset
class Book(TimeStampedBase):
name = models.CharField(
_('name'),
max_length=255,
unique=True
)
author = models.CharField(
_('author'),
max_length=255,
)
image_url = models.URLField(
_('image url'),
max_length=1000,
blank=True
)
url = models.URLField(
_('url'),
max_length=1000,
blank=True
)
content = MarkdownField(
blank=True
)
public = models.BooleanField(
default=False
)
featured = models.BooleanField(
default=False
)
tags = models.ManyToManyField(
to=BookTag,
blank=True,
)
notes = models.TextField(
verbose_name=_("notes"),
help_text=_("Staff notes."),
blank=True
)
rating = models.FloatField(
_('rating'),
default=0
)
def calculate_rating(self):
rating = self.book_reviews.aggregate(Avg('rating')).get('rating__avg')
return round(rating, 2) if rating else 0
def update_rating(self):
self.rating = self.calculate_rating()
@property
def area_ratings(self):
return self.book_reviews.aggregate(
area_1=Avg('area_1'),
area_2=Avg('area_2'),
area_3=Avg('area_3'),
area_4=Avg('area_4'),
area_5=Avg('area_5'),
area_6=Avg('area_6'),
area_7=Avg('area_7'),
)
objects = BookQuerySet.as_manager()
class Meta:
default_related_name = 'books'
ordering = ['name']
def __str__(self):
return self.name
def save(self, *args, **kwargs):
# Making sure only one book can be featured.
if self.featured:
try:
temp = Book.objects.get(featured=True)
if self != temp:
temp.featured = False
temp.save()
except Book.DoesNotExist:
pass
return super().save(*args, **kwargs)
class BookReview(OwnedBase, TimeStampedBase):
book = models.ForeignKey(
to=Book,
on_delete=models.CASCADE
)
rating = models.PositiveSmallIntegerField(
_('rating'),
validators=[MinValueValidator(1), MaxValueValidator(5)]
)
area_1 = models.PositiveSmallIntegerField(
_('area 1'),
validators=[MaxValueValidator(100)]
)
area_2 = models.PositiveSmallIntegerField(
_('area 2'),
validators=[MaxValueValidator(100)]
)
area_3 = models.PositiveSmallIntegerField(
_('area 3'),
validators=[MaxValueValidator(100)]
)
area_4 = models.PositiveSmallIntegerField(
_('area 4'),
validators=[MaxValueValidator(100)]
)
area_5 = models.PositiveSmallIntegerField(
_('area 5'),
validators=[MaxValueValidator(100)]
)
area_6 = models.PositiveSmallIntegerField(
_('area 6'),
validators=[MaxValueValidator(100)]
)
area_7 = models.PositiveSmallIntegerField(
_('area 7'),
validators=[MaxValueValidator(100)]
)
content = MarkdownField()
class Meta:
default_related_name = 'book_reviews'
unique_together = ['owner', 'book']
def __str__(self):
return 'Book review'
@receiver(post_save, sender=BookReview)
@receiver(post_delete, sender=BookReview)
def reset_book_rating(sender, instance, *args, **kwargs):
instance.book.update_rating()
instance.book.save()
class AcademyPage(Page):
template = 'academy/base.html'
def serve(self, request, *args, **kwargs):
return redirect(self.get_first_child().url)
parent_page_types = ['cms.RootPage']
subpage_types = ['CoursesPage', 'BookClubPage', 'QuizzesPage', 'resources.ResourcesPage']
class CoursesPage(Page):
template = 'academy/courses.html'
parent_page_types = ['AcademyPage']
subpage_types = []
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
return context
def __str__(self):
return self.title
class BookClubPage(Page):
template = 'academy/book_club.html'
parent_page_types = ['AcademyPage']
subpage_types = []
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
return context
def __str__(self):
return self.title
class QuizzesPage(Page):
template = 'academy/quizzes.html'
parent_page_types = ['AcademyPage']
subpage_types = []
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
return context
def __str__(self):
return self.title
# class ResourcesPage(Page):
# template = 'academy/resources.html'
#
# parent_page_types = ['AcademyPage']
# subpage_types = []
#
# def get_context(self, request, *args, **kwargs):
# context = super().get_context(request, *args, **kwargs)
# return context
#
# def __str__(self):
# return self.title
| [
"eraldo@eraldo.org"
] | eraldo@eraldo.org |
978437ad52f35204f05cb980483e1e9362d2b42e | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /tf3d/losses/classification_losses.py | 1e03f0309003580e6e82678834664cd02cbaca90 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 26,394 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains voxel classification losses."""
import functools
import gin
import gin.tf
import tensorflow as tf
from tf3d import standard_fields
from tf3d.losses import utils as loss_utils
from tf3d.utils import instance_segmentation_utils
from tf3d.utils import mask_utils
from tf3d.utils import metric_learning_utils
from tf3d.utils import sampling_utils
def _get_voxels_valid_mask(inputs_1):
"""Returns the mask that removes voxels that are not within image."""
return mask_utils.num_voxels_mask(inputs=inputs_1)
def _get_voxels_valid_inputs_outputs(inputs_1, outputs_1):
"""Applies the valid mask to input and output voxel tensors."""
valid_mask = _get_voxels_valid_mask(inputs_1=inputs_1)
inputs_1 = mask_utils.apply_mask_to_input_voxel_tensors(
inputs=inputs_1, valid_mask=valid_mask)
mask_utils.apply_mask_to_output_voxel_tensors(
outputs=outputs_1, valid_mask=valid_mask)
return inputs_1, outputs_1
def classification_loss_fn(logits,
labels,
num_valid_voxels=None,
weights=1.0):
"""Semantic segmentation cross entropy loss."""
logits_rank = len(logits.get_shape().as_list())
labels_rank = len(labels.get_shape().as_list())
if logits_rank != labels_rank:
raise ValueError(
'Logits and labels should have the same rank.')
if logits_rank != 2 and logits_rank != 3:
raise ValueError('Logits and labels should have either 2 or 3 dimensions.')
if logits_rank == 2:
if num_valid_voxels is not None:
raise ValueError(
'`num_valid_voxels` should be None if not using batched logits.')
elif logits_rank == 3:
if num_valid_voxels is None:
raise ValueError(
'`num_valid_voxels` cannot be None if using batched logits.')
if logits_rank == 3:
if (isinstance(weights, tf.Tensor) and
len(weights.get_shape().as_list()) == 3):
use_weights = True
else:
use_weights = False
batch_size = logits.get_shape().as_list()[0]
logits_list = []
labels_list = []
weights_list = []
for i in range(batch_size):
num_valid_voxels_i = num_valid_voxels[i]
logits_list.append(logits[i, 0:num_valid_voxels_i, :])
labels_list.append(labels[i, 0:num_valid_voxels_i, :])
if use_weights:
weights_list.append(weights[i, 0:num_valid_voxels_i, :])
logits = tf.concat(logits_list, axis=0)
labels = tf.concat(labels_list, axis=0)
if use_weights:
weights = tf.concat(weights_list, axis=0)
weights = tf.convert_to_tensor(weights, dtype=tf.float32)
if labels.get_shape().as_list()[-1] == 1:
num_classes = logits.get_shape().as_list()[-1]
labels = tf.one_hot(tf.reshape(labels, shape=[-1]), num_classes)
losses = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.stop_gradient(labels), logits=logits)
return tf.reduce_mean(losses * tf.reshape(weights, [-1]))
@gin.configurable('classification_loss', denylist=['inputs', 'outputs'])
def classification_loss(inputs, outputs):
"""Applies categorical crossentropy loss to voxel predictions.
Note that `labels` and `weights` are resized to match `logits`.
Args:
inputs: A dictionary of `Tensors` that contains ground-truth.
outputs: A dictionary of `Tensors` that contains predictions.
Returns:
The loss `Tensor`.
Raises:
ValueError: If the loss method is unknown.
ValueError: If voxel logits and labels have different dimensions.
ValueError: If num_valid_voxels is None in batch mode.
"""
if standard_fields.InputDataFields.object_class_voxels not in inputs:
raise ValueError('`object_class_voxels` is missing in inputs.')
if (standard_fields.DetectionResultFields.object_semantic_voxels not in
outputs):
raise ValueError('`object_semantic_voxels` is missing in outputs.')
logits = outputs[standard_fields.DetectionResultFields.object_semantic_voxels]
labels = inputs[standard_fields.InputDataFields.object_class_voxels]
if standard_fields.InputDataFields.num_valid_voxels in inputs:
num_valid_voxels = inputs[standard_fields.InputDataFields.num_valid_voxels]
else:
num_valid_voxels = None
if standard_fields.InputDataFields.voxel_loss_weights in inputs:
weights = inputs[standard_fields.InputDataFields.voxel_loss_weights]
else:
weights = 1.0
return classification_loss_fn(
logits=logits,
labels=labels,
num_valid_voxels=num_valid_voxels,
weights=weights)
def _box_classification_loss_unbatched(inputs_1, outputs_1, is_intermediate,
is_balanced, mine_hard_negatives,
hard_negative_score_threshold):
"""Loss function for input and outputs of batch size 1."""
valid_mask = _get_voxels_valid_mask(inputs_1=inputs_1)
if is_intermediate:
logits = outputs_1[standard_fields.DetectionResultFields
.intermediate_object_semantic_voxels]
else:
logits = outputs_1[
standard_fields.DetectionResultFields.object_semantic_voxels]
num_classes = logits.get_shape().as_list()[-1]
if num_classes is None:
raise ValueError('Number of classes is unknown.')
logits = tf.boolean_mask(tf.reshape(logits, [-1, num_classes]), valid_mask)
labels = tf.boolean_mask(
tf.reshape(
inputs_1[standard_fields.InputDataFields.object_class_voxels],
[-1, 1]), valid_mask)
if mine_hard_negatives or is_balanced:
instances = tf.boolean_mask(
tf.reshape(
inputs_1[standard_fields.InputDataFields.object_instance_id_voxels],
[-1]), valid_mask)
params = {}
if mine_hard_negatives:
negative_scores = tf.reshape(tf.nn.softmax(logits)[:, 0], [-1])
hard_negative_mask = tf.logical_and(
tf.less(negative_scores, hard_negative_score_threshold),
tf.equal(tf.reshape(labels, [-1]), 0))
hard_negative_labels = tf.boolean_mask(labels, hard_negative_mask)
hard_negative_logits = tf.boolean_mask(logits, hard_negative_mask)
hard_negative_instances = tf.boolean_mask(
tf.ones_like(instances) * (tf.reduce_max(instances) + 1),
hard_negative_mask)
logits = tf.concat([logits, hard_negative_logits], axis=0)
instances = tf.concat([instances, hard_negative_instances], axis=0)
labels = tf.concat([labels, hard_negative_labels], axis=0)
if is_balanced:
weights = loss_utils.get_balanced_loss_weights_multiclass(
labels=tf.expand_dims(instances, axis=1))
params['weights'] = weights
return classification_loss_fn(
logits=logits,
labels=labels,
**params)
@gin.configurable
def box_classification_loss(inputs,
outputs,
is_intermediate=False,
is_balanced=False,
mine_hard_negatives=False,
hard_negative_score_threshold=0.5):
"""Calculates the voxel level classification loss.
Args:
inputs: A dictionary of tf.Tensors with our input label data.
outputs: A dictionary of tf.Tensors with the network output.
is_intermediate: If True, loss will be computed on intermediate tensors.
is_balanced: If True, the per-voxel losses are re-weighted to have equal
total weight for foreground vs. background voxels.
mine_hard_negatives: If True, mines hard negatives and applies loss on them
too.
hard_negative_score_threshold: A prediction is a hard negative if its label
is 0 and the score for the 0 class is less than this threshold.
Returns:
loss: A tf.float32 scalar corresponding to softmax classification loss.
Raises:
ValueError: If the size of the third dimension of the predicted logits is
unknown at graph construction.
"""
return loss_utils.apply_unbatched_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs,
unbatched_loss_fn=functools.partial(
_box_classification_loss_unbatched,
is_intermediate=is_intermediate,
is_balanced=is_balanced,
mine_hard_negatives=mine_hard_negatives,
hard_negative_score_threshold=hard_negative_score_threshold))
def _box_classification_using_center_distance_loss_unbatched(
inputs_1,
outputs_1,
is_intermediate,
is_balanced,
max_positive_normalized_distance):
"""Loss function for input and outputs of batch size 1."""
inputs_1, outputs_1 = _get_voxels_valid_inputs_outputs(
inputs_1=inputs_1, outputs_1=outputs_1)
if is_intermediate:
output_object_centers = outputs_1[
standard_fields.DetectionResultFields.intermediate_object_center_voxels]
output_object_length = outputs_1[
standard_fields.DetectionResultFields.intermediate_object_length_voxels]
output_object_height = outputs_1[
standard_fields.DetectionResultFields.intermediate_object_height_voxels]
output_object_width = outputs_1[
standard_fields.DetectionResultFields.intermediate_object_width_voxels]
output_object_rotation_matrix = outputs_1[
standard_fields.DetectionResultFields
.intermediate_object_rotation_matrix_voxels]
logits = outputs_1[standard_fields.DetectionResultFields
.intermediate_object_semantic_voxels]
else:
output_object_centers = outputs_1[
standard_fields.DetectionResultFields.object_center_voxels]
output_object_length = outputs_1[
standard_fields.DetectionResultFields.object_length_voxels]
output_object_height = outputs_1[
standard_fields.DetectionResultFields.object_height_voxels]
output_object_width = outputs_1[
standard_fields.DetectionResultFields.object_width_voxels]
output_object_rotation_matrix = outputs_1[
standard_fields.DetectionResultFields.object_rotation_matrix_voxels]
logits = outputs_1[
standard_fields.DetectionResultFields.object_semantic_voxels]
normalized_center_distance = loss_utils.get_normalized_corner_distances(
predicted_boxes_center=output_object_centers,
predicted_boxes_length=output_object_length,
predicted_boxes_height=output_object_height,
predicted_boxes_width=output_object_width,
predicted_boxes_rotation_matrix=output_object_rotation_matrix,
gt_boxes_center=inputs_1[
standard_fields.InputDataFields.object_center_voxels],
gt_boxes_length=inputs_1[
standard_fields.InputDataFields.object_length_voxels],
gt_boxes_height=inputs_1[
standard_fields.InputDataFields.object_height_voxels],
gt_boxes_width=inputs_1[
standard_fields.InputDataFields.object_width_voxels],
gt_boxes_rotation_matrix=inputs_1[
standard_fields.InputDataFields.object_rotation_matrix_voxels])
labels = tf.reshape(
inputs_1[standard_fields.InputDataFields.object_class_voxels], [-1])
instances = tf.reshape(
inputs_1[standard_fields.InputDataFields.object_instance_id_voxels], [-1])
params = {}
if is_balanced:
weights = loss_utils.get_balanced_loss_weights_multiclass(
labels=tf.expand_dims(instances, axis=1))
params['weights'] = weights
def loss_fn():
"""Loss function."""
num_classes = logits.get_shape().as_list()[-1]
if num_classes is None:
raise ValueError('Number of classes is unknown.')
labels_one_hot = tf.one_hot(indices=(labels - 1), depth=(num_classes - 1))
inverse_distance_coef = tf.maximum(
tf.minimum(
1.0 - normalized_center_distance / max_positive_normalized_distance,
1.0), 0.0)
labels_one_hot = tf.reshape(inverse_distance_coef, [-1, 1]) * labels_one_hot
background_label = 1.0 - tf.math.reduce_sum(
labels_one_hot, axis=1, keepdims=True)
labels_one_hot = tf.concat([background_label, labels_one_hot], axis=1)
loss = classification_loss_fn(
logits=logits,
labels=labels_one_hot,
**params)
return loss
return tf.cond(
tf.greater(tf.shape(labels)[0], 0), loss_fn,
lambda: tf.constant(0.0, dtype=tf.float32))
@gin.configurable
def box_classification_using_center_distance_loss(
inputs,
outputs,
is_intermediate=False,
is_balanced=False,
max_positive_normalized_distance=0.3):
"""Calculates the loss based on predicted center distance from gt center.
Computes the loss using the object properties of the voxel tensors.
Args:
inputs: A dictionary of tf.Tensors with our input label data.
outputs: A dictionary of tf.Tensors with the network output.
is_intermediate: If True, loss will be computed on intermediate tensors.
is_balanced: If True, the per-voxel losses are re-weighted to have equal
total weight for foreground vs. background voxels.
max_positive_normalized_distance: Maximum distance of a predicted box from
the ground-truth box that we use to classify the predicted box as
positive.
Returns:
loss: A tf.float32 scalar corresponding to distance confidence loss.
"""
return loss_utils.apply_unbatched_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs,
unbatched_loss_fn=functools.partial(
_box_classification_using_center_distance_loss_unbatched,
is_intermediate=is_intermediate,
is_balanced=is_balanced,
max_positive_normalized_distance=max_positive_normalized_distance))
def classification_loss_using_mask_iou_func_unbatched(
embeddings, instance_ids, sampled_embeddings,
sampled_instance_ids, sampled_class_labels, sampled_logits,
similarity_strategy, is_balanced):
"""Classification loss using mask iou.
Args:
embeddings: A tf.float32 tensor of size [n, f].
instance_ids: A tf.int32 tensor of size [n].
sampled_embeddings: A tf.float32 tensor of size [num_samples, f].
sampled_instance_ids: A tf.int32 tensor of size [num_samples].
sampled_class_labels: A tf.int32 tensor of size [num_samples, 1].
sampled_logits: A tf.float32 tensor of size [num_samples, num_classes].
similarity_strategy: Defines the method for computing similarity between
embedding vectors. Possible values are 'dotproduct' and
'distance'.
is_balanced: If True, the per-voxel losses are re-weighted to have equal
total weight for foreground vs. background voxels.
Returns:
A tf.float32 loss scalar tensor.
"""
predicted_soft_masks = metric_learning_utils.embedding_centers_to_soft_masks(
embedding=embeddings,
centers=sampled_embeddings,
similarity_strategy=similarity_strategy)
predicted_masks = tf.cast(
tf.greater(predicted_soft_masks, 0.5), dtype=tf.float32)
gt_masks = tf.cast(
tf.equal(
tf.expand_dims(sampled_instance_ids, axis=1),
tf.expand_dims(instance_ids, axis=0)),
dtype=tf.float32)
pairwise_iou = instance_segmentation_utils.points_mask_pairwise_iou(
masks1=predicted_masks, masks2=gt_masks)
num_classes = sampled_logits.get_shape().as_list()[1]
sampled_class_labels_one_hot = tf.one_hot(
indices=tf.reshape(sampled_class_labels, [-1]), depth=num_classes)
sampled_class_labels_one_hot_fg = sampled_class_labels_one_hot[:, 1:]
iou_coefs = tf.tile(tf.reshape(pairwise_iou, [-1, 1]), [1, num_classes - 1])
sampled_class_labels_one_hot_fg *= iou_coefs
sampled_class_labels_one_hot_bg = tf.maximum(1.0 - tf.math.reduce_sum(
sampled_class_labels_one_hot_fg, axis=1, keepdims=True), 0.0)
sampled_class_labels_one_hot = tf.concat(
[sampled_class_labels_one_hot_bg, sampled_class_labels_one_hot_fg],
axis=1)
params = {}
if is_balanced:
weights = loss_utils.get_balanced_loss_weights_multiclass(
labels=tf.expand_dims(sampled_instance_ids, axis=1))
params['weights'] = weights
return classification_loss_fn(
logits=sampled_logits, labels=sampled_class_labels_one_hot, **params)
def classification_loss_using_mask_iou_func(embeddings,
logits,
instance_ids,
class_labels,
num_samples,
valid_mask=None,
max_instance_id=None,
similarity_strategy='dotproduct',
is_balanced=True):
"""Classification loss using mask iou.
Args:
embeddings: A tf.float32 tensor of size [batch_size, n, f].
logits: A tf.float32 tensor of size [batch_size, n, num_classes]. It is
assumed that background is class 0.
instance_ids: A tf.int32 tensor of size [batch_size, n].
class_labels: A tf.int32 tensor of size [batch_size, n]. It is assumed
that the background voxels are assigned to class 0.
num_samples: An int determining the number of samples.
valid_mask: A tf.bool tensor of size [batch_size, n] that is True when an
element is valid and False if it needs to be ignored. By default the value
is None which means it is not applied.
max_instance_id: If set, instance ids larger than that value will be
ignored. If not set, it will be computed from instance_ids tensor.
similarity_strategy: Defines the method for computing similarity between
embedding vectors. Possible values are 'dotproduct' and
'distance'.
is_balanced: If True, the per-voxel losses are re-weighted to have equal
total weight for foreground vs. background voxels.
Returns:
A tf.float32 scalar loss tensor.
"""
batch_size = embeddings.get_shape().as_list()[0]
if batch_size is None:
raise ValueError('Unknown batch size at graph construction time.')
if max_instance_id is None:
max_instance_id = tf.reduce_max(instance_ids)
class_labels = tf.reshape(class_labels, [batch_size, -1, 1])
sampled_embeddings, sampled_instance_ids, sampled_indices = (
sampling_utils.balanced_sample(
features=embeddings,
instance_ids=instance_ids,
num_samples=num_samples,
valid_mask=valid_mask,
max_instance_id=max_instance_id))
losses = []
for i in range(batch_size):
embeddings_i = embeddings[i, :, :]
instance_ids_i = instance_ids[i, :]
class_labels_i = class_labels[i, :, :]
logits_i = logits[i, :]
sampled_embeddings_i = sampled_embeddings[i, :, :]
sampled_instance_ids_i = sampled_instance_ids[i, :]
sampled_indices_i = sampled_indices[i, :]
sampled_class_labels_i = tf.gather(class_labels_i, sampled_indices_i)
sampled_logits_i = tf.gather(logits_i, sampled_indices_i)
if valid_mask is not None:
valid_mask_i = valid_mask[i]
embeddings_i = tf.boolean_mask(embeddings_i, valid_mask_i)
instance_ids_i = tf.boolean_mask(instance_ids_i, valid_mask_i)
loss_i = classification_loss_using_mask_iou_func_unbatched(
embeddings=embeddings_i,
instance_ids=instance_ids_i,
sampled_embeddings=sampled_embeddings_i,
sampled_instance_ids=sampled_instance_ids_i,
sampled_class_labels=sampled_class_labels_i,
sampled_logits=sampled_logits_i,
similarity_strategy=similarity_strategy,
is_balanced=is_balanced)
losses.append(loss_i)
return tf.math.reduce_mean(tf.stack(losses))
@gin.configurable(
'classification_loss_using_mask_iou', denylist=['inputs', 'outputs'])
def classification_loss_using_mask_iou(inputs,
outputs,
num_samples,
max_instance_id=None,
similarity_strategy='distance',
is_balanced=True,
is_intermediate=False):
"""Classification loss with an iou threshold.
Args:
inputs: A dictionary that contains
num_valid_voxels - A tf.int32 tensor of size [batch_size].
instance_ids - A tf.int32 tensor of size [batch_size, n].
class_labels - A tf.int32 tensor of size [batch_size, n]. It is assumed
that the background voxels are assigned to class 0.
outputs: A dictionart that contains
embeddings - A tf.float32 tensor of size [batch_size, n, f].
logits - A tf.float32 tensor of size [batch_size, n, num_classes]. It is
assumed that background is class 0.
num_samples: An int determining the number of samples.
max_instance_id: If set, instance ids larger than that value will be
ignored. If not set, it will be computed from instance_ids tensor.
similarity_strategy: Defines the method for computing similarity between
embedding vectors. Possible values are 'dotproduct' and
'distance'.
is_balanced: If True, the per-voxel losses are re-weighted to have equal
total weight for foreground vs. background voxels.
is_intermediate: True if applied to intermediate predictions;
otherwise, False.
Returns:
A tf.float32 scalar loss tensor.
"""
instance_ids_key = standard_fields.InputDataFields.object_instance_id_voxels
class_labels_key = standard_fields.InputDataFields.object_class_voxels
num_voxels_key = standard_fields.InputDataFields.num_valid_voxels
if is_intermediate:
embedding_key = (
standard_fields.DetectionResultFields
.intermediate_instance_embedding_voxels)
logits_key = (
standard_fields.DetectionResultFields
.intermediate_object_semantic_voxels)
else:
embedding_key = (
standard_fields.DetectionResultFields.instance_embedding_voxels)
logits_key = standard_fields.DetectionResultFields.object_semantic_voxels
if instance_ids_key not in inputs:
raise ValueError('instance_ids is missing in inputs.')
if class_labels_key not in inputs:
raise ValueError('class_labels is missing in inputs.')
if num_voxels_key not in inputs:
raise ValueError('num_voxels is missing in inputs.')
if embedding_key not in outputs:
raise ValueError('embedding is missing in outputs.')
if logits_key not in outputs:
raise ValueError('logits is missing in outputs.')
batch_size = inputs[num_voxels_key].get_shape().as_list()[0]
if batch_size is None:
raise ValueError('batch_size is not defined at graph construction time.')
num_valid_voxels = inputs[num_voxels_key]
num_voxels = tf.shape(inputs[instance_ids_key])[1]
valid_mask = tf.less(
tf.tile(tf.expand_dims(tf.range(num_voxels), axis=0), [batch_size, 1]),
tf.expand_dims(num_valid_voxels, axis=1))
return classification_loss_using_mask_iou_func(
embeddings=outputs[embedding_key],
logits=outputs[logits_key],
instance_ids=tf.reshape(inputs[instance_ids_key], [batch_size, -1]),
class_labels=inputs[class_labels_key],
num_samples=num_samples,
valid_mask=valid_mask,
max_instance_id=max_instance_id,
similarity_strategy=similarity_strategy,
is_balanced=is_balanced)
def _voxel_hard_negative_classification_loss_unbatched(
inputs_1, outputs_1, is_intermediate, gamma):
"""Loss function for input and outputs of batch size 1."""
inputs_1, outputs_1 = _get_voxels_valid_inputs_outputs(
inputs_1=inputs_1, outputs_1=outputs_1)
if is_intermediate:
logits = outputs_1[standard_fields.DetectionResultFields
.intermediate_object_semantic_voxels]
else:
logits = outputs_1[
standard_fields.DetectionResultFields.object_semantic_voxels]
labels = tf.reshape(
inputs_1[standard_fields.InputDataFields.object_class_voxels], [-1])
background_mask = tf.equal(labels, 0)
num_background_points = tf.reduce_sum(
tf.cast(background_mask, dtype=tf.int32))
def loss_fn():
"""Loss function."""
num_classes = logits.get_shape().as_list()[-1]
if num_classes is None:
raise ValueError('Number of classes is unknown.')
masked_logits = tf.boolean_mask(logits, background_mask)
masked_weights = tf.pow(
1.0 - tf.reshape(tf.nn.softmax(masked_logits)[:, 0], [-1, 1]), gamma)
num_points = tf.shape(masked_logits)[0]
masked_weights = masked_weights * tf.cast(
num_points, dtype=tf.float32) / tf.reduce_sum(masked_weights)
masked_labels_one_hot = tf.one_hot(
indices=tf.boolean_mask(labels, background_mask), depth=num_classes)
loss = classification_loss_fn(
logits=masked_logits,
labels=masked_labels_one_hot,
weights=masked_weights)
return loss
cond = tf.logical_and(
tf.greater(num_background_points, 0), tf.greater(tf.shape(labels)[0], 0))
return tf.cond(cond, loss_fn, lambda: tf.constant(0.0, dtype=tf.float32))
@gin.configurable
def hard_negative_classification_loss(
inputs,
outputs,
is_intermediate=False,
gamma=1.0):
"""Calculates the loss based on predicted center distance from gt center.
Computes the loss using the object properties of the voxel tensors.
Args:
inputs: A dictionary of tf.Tensors with our input label data.
outputs: A dictionary of tf.Tensors with the network output.
is_intermediate: If True, loss will be computed on intermediate tensors.
gamma: Gamma similar to how it is used in focal loss.
Returns:
loss: A tf.float32 scalar corresponding to distance confidence loss.
"""
return loss_utils.apply_unbatched_loss_on_voxel_tensors(
inputs=inputs,
outputs=outputs,
unbatched_loss_fn=functools.partial(
_voxel_hard_negative_classification_loss_unbatched,
is_intermediate=is_intermediate,
gamma=gamma))
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
306b962664a9d20947605344247f11e3ae9b082a | 674f5dde693f1a60e4480e5b66fba8f24a9cb95d | /armulator/armv6/opcodes/concrete/orn_immediate_t1.py | 106ef9da7793ef683dc7f799764d54ac9acacd72 | [
"MIT"
] | permissive | matan1008/armulator | 75211c18ebc9cd9d33a02890e76fc649483c3aad | 44f4275ab1cafff3cf7a1b760bff7f139dfffb07 | refs/heads/master | 2023-08-17T14:40:52.793120 | 2023-08-08T04:57:02 | 2023-08-08T04:57:02 | 91,716,042 | 29 | 7 | MIT | 2023-08-08T04:55:59 | 2017-05-18T16:37:55 | Python | UTF-8 | Python | false | false | 804 | py | from armulator.armv6.bits_ops import substring, bit_at, chain
from armulator.armv6.opcodes.abstract_opcodes.orn_immediate import OrnImmediate
from armulator.armv6.shift import thumb_expand_imm_c
class OrnImmediateT1(OrnImmediate):
@staticmethod
def from_bitarray(instr, processor):
imm8 = substring(instr, 7, 0)
rd = substring(instr, 11, 8)
imm3 = substring(instr, 14, 12)
rn = substring(instr, 19, 16)
setflags = bit_at(instr, 20)
i = bit_at(instr, 26)
imm32, carry = thumb_expand_imm_c(chain(i, chain(imm3, imm8, 8), 11), processor.registers.cpsr.c)
if rd in (13, 15) or rn == 13:
print('unpredictable')
else:
return OrnImmediateT1(instr, setflags=setflags, d=rd, n=rn, imm32=imm32, carry=carry)
| [
"matan1008@gmail.com"
] | matan1008@gmail.com |
f68283bb9341a87ddbd9066b3c3b1132379fc85c | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/lobby/boosters/BoostersPanelComponent.py | 0af565b33b7e60018dbab49813b2dd2d499360f8 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 5,913 | py | # 2017.08.29 21:46:41 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/boosters/BoostersPanelComponent.py
from gui.ClientUpdateManager import g_clientUpdateManager
from gui.Scaleform.daapi.view.meta.SlotsPanelMeta import SlotsPanelMeta
from gui.Scaleform.genConsts.BOOSTER_CONSTANTS import BOOSTER_CONSTANTS
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from gui.Scaleform.locale.TOOLTIPS import TOOLTIPS
from gui.goodies.goodie_items import MAX_ACTIVE_BOOSTERS_COUNT
from gui.shared.utils.functions import makeTooltip
from gui.shared.utils.requesters.ItemsRequester import REQ_CRITERIA
from helpers import dependency
from skeletons.gui.game_control import IBoostersController
from skeletons.gui.goodies import IGoodiesCache
_GUI_SLOTS_PROPS = {'slotsCount': MAX_ACTIVE_BOOSTERS_COUNT,
'slotWidth': 50,
'paddings': 64,
'groupPadding': 18,
'ySlotPosition': 5,
'offsetSlot': 13,
'useOnlyLeftBtn': True}
ADD_BOOSTER_ID = 'add'
_ADD_AVAILABLE_BOOSTER_ID = 'addAvailable'
_EMPTY_BOOSTER_ID = 'empty'
class BoostersPanelComponent(SlotsPanelMeta):
boosters = dependency.descriptor(IBoostersController)
goodiesCache = dependency.descriptor(IGoodiesCache)
def __init__(self):
super(BoostersPanelComponent, self).__init__()
self._isPanelInactive = True
self._wasPopulated = False
self._slotsMap = {}
def setSettings(self, isPanelInactive = True):
self._isPanelInactive = isPanelInactive
if self._wasPopulated:
self._buildList()
def getBoosterSlotID(self, idx):
return self._slotsMap.get(int(idx), None)
def getSlotTooltipBody(self, slotIdx):
boosterID = self._slotsMap.get(int(slotIdx), None)
tooltip = ''
if boosterID in (ADD_BOOSTER_ID, _ADD_AVAILABLE_BOOSTER_ID):
if not self._isPanelInactive:
body = TOOLTIPS.BOOSTERSPANEL_OPENBOOSTERSWINDOW_BODY
tooltip = makeTooltip(None, body)
else:
tooltip = TOOLTIPS_CONSTANTS.BOOSTERS_BOOSTER_INFO
return tooltip
def _populate(self):
super(BoostersPanelComponent, self)._populate()
g_clientUpdateManager.addCallbacks({'goodies': self.__onUpdateGoodies})
self.boosters.onBoosterChangeNotify += self.__onUpdateGoodies
self._buildList()
self._wasPopulated = True
def _dispose(self):
self._isPanelInactive = None
self._wasPopulated = None
self._slotsMap = None
self.boosters.onBoosterChangeNotify -= self.__onUpdateGoodies
g_clientUpdateManager.removeObjectCallbacks(self)
super(BoostersPanelComponent, self)._dispose()
return
def __getAvailableBoosters(self):
criteria = REQ_CRITERIA.BOOSTER.IS_READY_TO_ACTIVATE
return self.goodiesCache.getBoosters(criteria=criteria)
def _buildList(self):
result = []
activeBoosters = self.goodiesCache.getBoosters(criteria=REQ_CRITERIA.BOOSTER.ACTIVE)
activeBoostersList = sorted(activeBoosters.values(), key=lambda b: b.getUsageLeftTime(), reverse=True)
availableBoostersCount = len(self.__getAvailableBoosters())
activeBoostersCount = min(len(activeBoostersList), MAX_ACTIVE_BOOSTERS_COUNT)
freeSlotsCount = MAX_ACTIVE_BOOSTERS_COUNT - min(activeBoostersCount, MAX_ACTIVE_BOOSTERS_COUNT)
addBoostersSlotsCount = min(freeSlotsCount, availableBoostersCount)
self._slotsMap = {}
for idx in range(0, activeBoostersCount):
booster = activeBoostersList[idx]
self._slotsMap[idx] = booster.boosterID
result.append(self.__makeBoosterVO(idx, booster))
icon = ''
if not self._isPanelInactive:
icon = RES_ICONS.MAPS_ICONS_ARTEFACT_EMPTYORDER
addAndActiveBoostersCount = activeBoostersCount + addBoostersSlotsCount
for idx in range(activeBoostersCount, MAX_ACTIVE_BOOSTERS_COUNT):
self._slotsMap[idx], slotLinkage = self.getEmptySlotParams(idx, addAndActiveBoostersCount)
result.append(self.__makeEmptyBoosterVO(idx, slotLinkage, icon))
self.as_setPanelPropsS(_GUI_SLOTS_PROPS)
self.as_setSlotsS(result)
def getEmptySlotParams(self, idx, addAndActiveBoostersCount):
if idx < addAndActiveBoostersCount and not self._isPanelInactive:
slotLinkage = BOOSTER_CONSTANTS.SLOT_ADD_UI
emptyBoosterID = _ADD_AVAILABLE_BOOSTER_ID
else:
slotLinkage = BOOSTER_CONSTANTS.SLOT_UI
emptyBoosterID = ADD_BOOSTER_ID
return (emptyBoosterID, slotLinkage)
def __makeBoosterVO(self, idx, booster):
return {'boosterId': booster.boosterID,
'id': str(idx),
'icon': booster.icon,
'inCooldown': booster.inCooldown,
'cooldownPercent': booster.getCooldownAsPercent(),
'leftTime': booster.getUsageLeftTime(),
'leftTimeText': booster.getShortLeftTimeStr(),
'showLeftTime': True,
'isDischarging': True,
'isInactive': self._isPanelInactive,
'isEmpty': False,
'qualityIconSrc': booster.getQualityIcon(),
'slotLinkage': BOOSTER_CONSTANTS.SLOT_UI}
def __makeEmptyBoosterVO(self, idx, slotLinkage, icon):
return {'id': str(idx),
'isInactive': self._isPanelInactive,
'isEmpty': True,
'icon': icon,
'slotLinkage': slotLinkage,
'showLeftTime': False}
def __onUpdateGoodies(self, *args):
self._buildList()
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\lobby\boosters\BoostersPanelComponent.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:46:41 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
b906c1f40751b8ff47c98535098a44742864a010 | fd625e2ea155455c96261c8656a51be22fe420c8 | /Python/euler020.py | c7a2ebd25c8bf108eaf07104c5248867c84a68d6 | [
"MIT"
] | permissive | AnuragAnalog/project_euler | 9b84a6aa0061ad4582c8d0059c3c1eaddd844fd2 | 8babbefbd5b7008ad24509f24a9d5f50ba208f45 | refs/heads/master | 2021-12-12T12:07:29.338791 | 2021-11-01T04:26:44 | 2021-11-01T04:26:44 | 210,749,964 | 6 | 16 | MIT | 2021-11-01T04:26:45 | 2019-09-25T03:44:37 | Python | UTF-8 | Python | false | false | 519 | py | #!/usr/bin/python3
"""
n! means n × (n − 1) × ... × 3 × 2 × 1
For example, 10! = 10 × 9 × ... × 3 × 2 × 1 = 3628800,
and the sum of the digits in the number 10! is 3 + 6 + 2 + 8 + 8 + 0 + 0 = 27.
Find the sum of the digits in the number 100!
"""
def factorial(n: int) -> int:
if n == 1:
return 1
else:
return n * factorial(n-1)
def euler20() -> int:
fac100 = factorial(100)
tot = sum(list(map(int, list(str(fac100)))))
return tot
total = euler20()
print(total)
| [
"anurag.peddi1998@gmail.com"
] | anurag.peddi1998@gmail.com |
07bb350fe43a82cdc4351cf6d1f6b7437852dd81 | 44a8159afa9a26b30c07771fa8a6fbee9b1a1c5d | /src/manage.py | 1965ab128b3dd0545989dd53f099ae54d7cfda76 | [] | no_license | ionescuig/taxi-fares | 27aee078e531555078dd8548ac7035d2b1d20194 | 317b06a2ffb85eb8e488bb04d06579166559b62b | refs/heads/master | 2020-04-06T20:31:39.179112 | 2019-06-11T15:36:32 | 2019-06-11T15:36:32 | 157,760,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fares.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"ionescuig@yahoo.com"
] | ionescuig@yahoo.com |
1a02f71bb63f32d51a0da5cc51f580f610b2c315 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /hPaBJ7KJZ8fZtjJgL_0.py | 0c9342e275cdefa6e408278bec1c2b60c1f2d484 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | """
Write a function that takes an integer and returns a string with the given
number of `"a"`s in Edabit.
### Examples
how_many_times(5) ➞ "Edaaaaabit"
how_many_times(0) ➞ "Edbit"
how_many_times(12) ➞ "Edaaaaaaaaaaaabit"
### Notes
* The string must start with "Ed" and end with "bit".
* You'll only be given integers as test input.
"""
def how_many_times(num):
return "Ed{}bit".format("a" * num)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
5a28d2544c5264e815a06bdcb09331ba41cfba06 | 2ba6775c96a2c17de5949f7c0dc476548447f9fd | /flaskPet/management/__init__.py | da0dd1078b1b2c92a47c511d8f7e659ec848d658 | [] | no_license | reakain/flaskPet | d6f004058b8c92eb354760d8cad39ac3218a8287 | e41f2db382fc18bd0b5f84215df24856c125a237 | refs/heads/master | 2020-03-24T19:24:47.124669 | 2019-02-09T22:21:38 | 2019-02-09T22:21:38 | 142,924,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # -*- coding: utf-8 -*-
"""
flaskpet.management
~~~~~~~~~~~~~~~~~~
This module contains models, forms and views relevant
for managing FlaskPet
:copyright: (c) 2014 by the FlaskPet Team.
:license: BSD, see LICENSE for more details.
"""
import logging
# force plugins to be loaded
from . import plugins
__all__ = ('plugins', )
logger = logging.getLogger(__name__)
| [
"reakain@users.noreply.github.com"
] | reakain@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.