blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c45980c203b1e902e3c8147c7d5bdbfac2138505
|
ad9a7fbc9077990f1a5c984fb3653129d75c42db
|
/code/tests/algorithm/test_stack.py
|
4dae70f23f38793ec0b059036ee0352127104b2c
|
[] |
no_license
|
yvonne96/Algo
|
8b066df365089190dfac98253f39fa4398803e11
|
8c4a4537573a799f5b0e98e49d530322c2e9024b
|
refs/heads/master
| 2020-03-08T04:09:03.260919
| 2018-03-08T23:56:33
| 2018-03-08T23:56:33
| 127,913,000
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
import unittest, sys
from code.algorithms.stack import Stack
class TestStack(unittest.TestCase):
def test_empty_stack(self):
l = Stack()
self.assertEqual(l.items, [])
def test_standard_input(self):
l = Stack()
l.push(1)
l.push(2)
l.push(3)
l.push(4)
self.assertEqual(l.items[0], 1)
self.assertEqual(l.items[-1], 4)
def test_removing_items(self):
l = Stack()
l.push(1)
l.push(2)
l.push(3)
l.push(4)
l.pop()
l.pop()
self.assertEqual(l.items[0], 1)
self.assertEqual(l.items[-1], 2)
if __name__ == "__main__":
unittest.main()
|
[
"ciara.godwin3@mail.dcu.ie"
] |
ciara.godwin3@mail.dcu.ie
|
0f65c1e22e00ab4dcd5861542f3f43c535c17d0d
|
6aa59fb47d7b61a28eace0e421d6d898e920f5b6
|
/Polymorphism-Lab/Instruments.py
|
284d38854667774f28d2fc5e6e12dd1b8c6727de
|
[] |
no_license
|
Vigyrious/python_oop
|
1488bf7ffc53059a790a694660a03ebe6814f615
|
8c28e9f8fe6e2af0c0b35b6a691f1af65f0e8579
|
refs/heads/main
| 2023-03-18T10:12:08.768834
| 2021-03-12T20:50:43
| 2021-03-12T20:50:43
| 347,192,431
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
def play_instrument(instrument):
return instrument.play()
class Guitar:
def play(self):
print("playing the guitar")
guitar = Guitar()
play_instrument(guitar)
class Piano:
def play(self):
print("playing the piano")
piano = Piano()
play_instrument(piano)
|
[
"73179295+Vigyrious@users.noreply.github.com"
] |
73179295+Vigyrious@users.noreply.github.com
|
d06f552d7fb63d3aaf78af615b77f1b444d6b19e
|
7cd8ee14711eaf33cee0d9e06e78a974fc579242
|
/PIFramework/juicer/spiders/flipkart_wash.py
|
7dc2733e0b0de68c91287489bcdc8a9283c5bbb7
|
[] |
no_license
|
Chandler-Song/pi
|
c618117dfdd9a7496a57c69f029851e94787f591
|
aebc6d65b79ed43c66e7e1bf16d6d9f31b470372
|
refs/heads/master
| 2022-03-13T02:44:30.452673
| 2019-02-19T09:38:45
| 2019-02-19T09:38:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,644
|
py
|
from juicer.utils import *
from juicer.items import *
from selenium import webdriver
import MySQLdb
import time
import scrapy
import json
class FlipkartBestsellersbrowse(JuicerSpider):
name = "flipkart_washingmachine_browse"
start_urls = ['https://www.flipkart.com/washing-machines/pr?sid=j9e,abm,8qx&otracker=categorytree&page=1']
handle_httpstatus_list = [404, 302, 303, 403, 500, 999, 503]
def __init__(self, *args, **kwargs):
super(FlipkartBestsellersbrowse, self).__init__(*args, **kwargs)
self.URL = "https://www.flipkart.com"
def parse(self, response):
sel = Selector(response)
links = sel.xpath('//div[@class="_1UoZlX"]//a//@href').extract()
for i in links :
product_link = 'https://www.flipkart.com' + str(i)
print product_link
sk = product_link.split('&')[0].split('pid=')[-1]
if product_link : self.get_page('flipkart_bestsellers_terminal', product_link, sk)
for i in range(2,40) :
link = "https://www.flipkart.com/washing-machines/pr?sid=j9e,abm,8qx&otracker=categorytree&page=%s"%str(i)
yield Request(link,callback=self.parse_next,dont_filter=True)
def parse_next(self,response):
sel = Selector(response)
links = sel.xpath('//div[@class="_1UoZlX"]//a//@href').extract()
for i in links :
product_link = 'https://www.flipkart.com' + str(i)
print product_link
sk = product_link.split('&')[0].split('pid=')[-1]
if product_link : self.get_page('flipkart_bestsellers_terminal', product_link, sk)
|
[
"aravind@headrun.com"
] |
aravind@headrun.com
|
e6b8029f8d1c75b1be8ceac597f28472e513d647
|
21b5ad37b812ed78799d4efc1649579cc83d32fb
|
/job/migrations/0088_auto_20200623_0947.py
|
bee28fda8e18b5704fa97d9dea77ef1e528e4c9d
|
[] |
no_license
|
SaifulAbir/django-js-api
|
b6f18c319f8109884e71095ad49e08e50485bb25
|
fbf174b9cde2e7d25b4898f511df9c6f96d406cf
|
refs/heads/master
| 2023-02-12T16:09:21.508702
| 2021-01-14T09:05:15
| 2021-01-14T09:05:15
| 329,713,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
# Generated by Django 3.0.3 on 2020-06-23 03:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('job', '0087_auto_20200622_1503'),
]
operations = [
migrations.AlterField(
model_name='job',
name='company',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='jobs', to='job.Company'),
),
]
|
[
"rashed@ishraak.com"
] |
rashed@ishraak.com
|
b6aa640ae5d1b56f311c966e0424a292e051b6f8
|
cf3549c5200e78dd81095cd3e05b3015d6bc2290
|
/spiderman/misc/mysql_connect.py
|
a68f07b31d86c61ff0ff59e817805eba61b07087
|
[
"Apache-2.0"
] |
permissive
|
zzcv/python
|
e0c56a363188b8a3dcc030b10a7bd4aa1fc426b2
|
69ac0cabb7154816b1df415c0cc32966d6335718
|
refs/heads/master
| 2020-09-14T12:57:08.046356
| 2019-11-18T11:54:54
| 2019-11-18T11:54:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,039
|
py
|
#/usr/bin/env python
#coding=utf8
"""
# Author: kellanfan
# Created Time : Wed 23 May 2018 08:15:18 PM CST
# File Name: mysql_connect.py
# Description: 关于编码问题可看: https://stackoverflow.com/questions/3942888/unicodeencodeerror-latin-1-codec-cant-encode-character
"""
import pymysql
import yaml
import sys
#本地logger模块
#from logger import Logger
class MysqlConnect(object):
def __init__(self,filename):
self.__file = filename
self.__configs = self.__getconfig()
#self.__mylogger = Logger('mysql_log.yaml').outputLog()
try:
self.__host = self.__configs['host']
self.__user = self.__configs['user']
self.__password = self.__configs['password']
self.__database = self.__configs['database']
except:
#self.__mylogger.error('配置文件中缺少相关参数,请检查..')
sys.exit()
def __getconfig(self):
with open(self.__file) as f:
configs = yaml.load(f.read())
return configs
def open(self):
self.db = pymysql.connect(self.__host,self.__user,self.__password, self.__database, use_unicode=True, charset="utf8")
self.cursor = self.db.cursor()
def close(self):
self.cursor.close()
self.db.close()
def change_data(self, sql):
try:
self.open()
self.cursor.execute(sql)
self.db.commit()
return 0
except Exception as e:
self.db.rollback()
#self.__mylogger(e)
return e
finally:
self.close()
def select_data(self, sql):
try:
self.open()
self.cursor.execute(sql)
except Exception as e:
#self.__mylogger(e)
return e
else:
return self.cursor.fetchall()
finally:
self.close()
if __name__ == '__main__':
a = MysqlConnect('mysql_data.yaml')
sql = input("the sql: ")
print(a.select_data(sql))
|
[
"icyfk1989@163.com"
] |
icyfk1989@163.com
|
ca3f4c1da4dc8279a558f6ee7c8303c3a57f9cc6
|
1edd52cf197e5ae67b5939a3beb3e70761334e62
|
/Notes/Notes/Udemy/Aws-automation-with-boto3/Session-9-working-with-IAM/session-refresh/Iam-user-with-console.py
|
0f1f43acc8d7e16bc06aa2ff501db554f5bded2f
|
[] |
no_license
|
sandeepmchary/Devops_wordpress_Notes
|
bdcd85d526780d03c494ecb93e714e7ffe0a4d58
|
ffd2092162073e1e7342c6066d023d04e6ca8c1c
|
refs/heads/master
| 2022-06-18T21:33:02.471025
| 2022-06-12T11:14:47
| 2022-06-12T11:14:47
| 154,679,658
| 1
| 4
| null | 2022-05-19T16:59:57
| 2018-10-25T13:51:40
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,373
|
py
|
import boto3
from random import choice
import sys
def get_iam_client_object(profile_name="root"):
session=boto3.session.Session(profile_name="root")
iam_client=session.client(service_name="iam",region_name="us-east-2")
return iam_client
def get_random_passwd():
passwd_length=8
char_for_passwd="abcedfghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*()?<>~`"
password=[]
return "".join(choice(char_for_passwd) for each_char in range(passwd_length))
def main():
iam_client=get_iam_client_object()
Iam_user_name=input("Enter the Name: ")
passwd=get_random_passwd()
PolicyArn="arn:aws:iam::aws:policy/AdministratorAccess"
try:
iam_client.create_user(UserName=Iam_user_name)
except Exception as e:
if e.response['Error']['Code']=="EntityAlreadyexists":
print ("Already Iam User with {} is exists".format(Iam_user_name))
sys.exit(0)
else:
print("verify with system admin")
print(e)
sys.exit(0)
iam_client.create_login_profile(UserName=Iam_user_name,Password=passwd,PasswordResetRequired=False)
iam_client.attach_user_policy(UserName=Iam_user_name,PolicyArn=PolicyArn)
print("User Name:{}\nUser Password:{}".format(Iam_user_name,passwd))
return None
if __name__=="__main__":
main()
|
[
"awssandeepchary@gmail.com"
] |
awssandeepchary@gmail.com
|
44ffeff10786683f3179093b0fa74827dc15a5d8
|
9eac3fbc5cb8a98ccaa4a394e40e955ad8f239b0
|
/parametres/admin.py
|
7141aa98b6e9e1be50310496a4b41221ca7d3662
|
[] |
no_license
|
parheto10/tracability
|
435db9fddcdbf012cfafd6ee3739d90082018038
|
1c989a1219101b35f77ee5bd58b624e43368b55b
|
refs/heads/master
| 2023-02-22T07:33:35.800533
| 2021-01-22T13:45:59
| 2021-01-22T13:45:59
| 329,564,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
from django.contrib import admin
from .models import (
Sous_Prefecture,
Origine,
Prime,
Projet,
Activite,
Region,
Campagne,
Espece,
Cat_Plant,
Projet_Cat,
# Formation,
Pepiniere,
Detail_Pepiniere,
Detail_Retrait
)
class DetailsPepiniereAdmin(admin.TabularInline):
model = Detail_Pepiniere
extra = 0
class Details_RetraitAdmin(admin.TabularInline):
model = Detail_Retrait
extra = 0
class PepiniereAdmin(admin.ModelAdmin):
inlines = [DetailsPepiniereAdmin, Details_RetraitAdmin]
# inlines = [Details_RetraitAdmin]
admin.site.register(Activite)
admin.site.register(Campagne)
# admin.site.register(Client)
admin.site.register(Espece)
# admin.site.register(Formation)
admin.site.register(Prime)
admin.site.register(Origine)
admin.site.register(Projet)
admin.site.register(Region)
admin.site.register(Sous_Prefecture)
admin.site.register(Cat_Plant)
admin.site.register(Projet_Cat)
admin.site.register(Pepiniere, PepiniereAdmin)
# admin.site.register(Detail_pepiniere)
# admin.site.register(Retrait_Plant)
# Register your models here.
|
[
"parheto10@gmail.com"
] |
parheto10@gmail.com
|
ff0c2471925d48342885e8a6a838750e9b1df68c
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/offazure/v20200707/master_site.py
|
e166512e49b960143ea702411c4205817bdd90fa
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 5,549
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['MasterSite']
class MasterSite(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
e_tag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['MasterSitePropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Site REST Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] e_tag: eTag for concurrency control.
:param pulumi.Input[str] location: Azure location in which Sites is created.
:param pulumi.Input[str] name: Name of the Master site.
:param pulumi.Input[pulumi.InputType['MasterSitePropertiesArgs']] properties: Nested properties of Master site.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] site_name: Site name.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['e_tag'] = e_tag
__props__['location'] = location
__props__['name'] = name
__props__['properties'] = properties
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if site_name is None:
raise TypeError("Missing required property 'site_name'")
__props__['site_name'] = site_name
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:offazure/latest:MasterSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MasterSite, __self__).__init__(
'azure-nextgen:offazure/v20200707:MasterSite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MasterSite':
"""
Get an existing MasterSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return MasterSite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> pulumi.Output[Optional[str]]:
"""
eTag for concurrency control.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Azure location in which Sites is created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the Master site.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.MasterSitePropertiesResponse']:
"""
Nested properties of Master site.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of resource. Type = Microsoft.OffAzure/MasterSites.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
4bbdb32dbed101f2c682eb87ec43eb1ae55b552e
|
156a4b52240069ee10df53b39c20102d7368fcd1
|
/L13/shortly/shortly/wsgi.py
|
970004b25eb425d27f458d6a023ef55bf3ffdb74
|
[] |
no_license
|
Serdiuk-Roman/for_lit
|
0d7072b0d5da336be5bfb9c6370c1673a62e4574
|
80dc5a5bd8b8258a88b5801073296e034ce04d5a
|
refs/heads/master
| 2022-12-12T14:54:14.591181
| 2019-08-11T08:08:19
| 2019-08-11T08:08:19
| 126,608,657
| 0
| 0
| null | 2022-12-08T02:18:39
| 2018-03-24T14:42:45
|
Python
|
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for shortly project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "shortly.settings")
application = get_wsgi_application()
|
[
"serdiuk.r@gmail.com"
] |
serdiuk.r@gmail.com
|
289e6c858918567ab765985e9961d24038ccf7bc
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p4VQE/R3/benchmark/startQiskit567.py
|
2b27d4b62ab6147e30a8d70f873a234565f322af
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,528
|
py
|
# qubit number=3
# total number=13
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.x(input_qubit[0]) # number=8
prog.y(input_qubit[3]) # number=10
prog.x(input_qubit[0]) # number=9
prog.y(input_qubit[0]) # number=11
prog.y(input_qubit[0]) # number=12
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit567.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
a5817120be6c64eee97cb0929f8c7231d1ade532
|
6b7a2b5414f4a3f9ed116fa73a2ae5c732957ed4
|
/items/views.py
|
a7fbcfbabfad1c1ca64f3c084710a71e7dc8a912
|
[] |
no_license
|
sankha555/bestro
|
366e02838775484940cb224800ac07f0a9cbd3d3
|
7e26909fe2c9722a005630cde24e9d6433463ba3
|
refs/heads/main
| 2023-01-08T09:06:15.999259
| 2020-10-31T10:51:16
| 2020-10-31T10:51:16
| 308,825,633
| 0
| 0
| null | 2020-10-31T08:47:10
| 2020-10-31T07:11:06
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,240
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib import messages
from items.models import Item, Combo
from items.forms import ItemForm, ComboForm
@staff_member_required
def create_item(request):
if request.method == "POST":
form = ItemForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, f'Item Added to Menu!')
return redirect('menu')
else:
form = ItemForm()
context = {
'form' : form
}
return render(request, 'items/create_item.htm', context)
@staff_member_required
def update_item(request, pk):
item = get_object_or_404(Item, pk = pk)
if request.method == "POST":
form = ItemForm(request.POST, instance = item)
if form.is_valid():
form.save()
messages.success(request, f'Item Updated!')
return redirect('menu')
else:
form = ItemForm(instance = item)
context = {
'form' : form,
'item' : item
}
return render(request, 'items/update_item.htm', context)
@staff_member_required
def create_combo(request):
if request.method == "POST":
form = ComboForm(request.POST)
if form.is_valid():
form.save()
return redirect('create_combo')
else:
form = ComboForm()
context = {
'form' : form
}
return render(request, 'combos/create_combo.htm', context)
@staff_member_required
def update_combo(request, pk):
combo = get_object_or_404(Combo, pk = pk)
if request.method == "POST":
form = ComboForm(request.POST, instance = combo)
if form.is_valid():
form.save()
return redirect('create_combo')
else:
form = ComboForm(instance = combo)
context = {
'form' : form,
'combo' : combo
}
return render(request, 'items/create_combo.htm', context)
# Create your views here.
|
[
"f20190029@pilani.bits-pilani.ac.in"
] |
f20190029@pilani.bits-pilani.ac.in
|
531011d5c9305e5f6faed201af1fcb85dd90e145
|
acd41dc7e684eb2e58b6bef2b3e86950b8064945
|
/res/packages/scripts/scripts/client/FX/Events/AlignModel.py
|
b24c7f10e3f9418dbdf4aeea2f0efb6d859aa723
|
[] |
no_license
|
webiumsk/WoT-0.9.18.0
|
e07acd08b33bfe7c73c910f5cb2a054a58a9beea
|
89979c1ad547f1a1bbb2189f5ee3b10685e9a216
|
refs/heads/master
| 2021-01-20T09:37:10.323406
| 2017-05-04T13:51:43
| 2017-05-04T13:51:43
| 90,268,530
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,280
|
py
|
# 2017.05.04 15:20:53 Střední Evropa (letní čas)
# Embedded file name: scripts/client/FX/Events/AlignModel.py
from FX.Event import Event
from FX import s_sectionProcessors
from bwdebug import *
class AlignModel(Event):
"""
This class implements an Event that sets the basis vectors for a PyModel.
"""
def go(self, effect, actor, source, target, **kargs):
"""
This method initiates the AlignModel event. It requires a "Basis"
parameter to be passed into the variable arguments dictionary, which
is a tuple of (dir,pos).
"""
try:
if kargs.has_key('ModelAlignment'):
dir, pos = kargs['ModelAlignment']
elif kargs.has_key('Basis'):
dir, pos = kargs['Basis']
actor.position = pos
actor.yaw = math.atan2(dir.x, dir.z)
except:
WARNING_MSG('No basis was passed into the argument list', self, actor, source, target, kargs)
return 0.0
s_sectionProcessors['AlignModel'] = AlignModel
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\FX\Events\AlignModel.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:20:53 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
a9090ceb80a1627f5e03947dfe4312844ebe0d95
|
25e481ef7fba79285f4c8a7fa2e81c8b2b7f9cce
|
/saleor/search/documents.py
|
21caff48c91dad01e664334c00d2f0a664a31fe8
|
[
"BSD-2-Clause"
] |
permissive
|
arslanahmd/Ghar-Tameer
|
59e60def48a14f9452dfefe2edf30e362878191d
|
72401b2fc0079e6d52e844afd8fcf57122ad319f
|
refs/heads/master
| 2023-01-31T04:08:26.288332
| 2018-06-07T18:02:01
| 2018-06-07T18:02:01
| 136,231,127
| 0
| 0
|
NOASSERTION
| 2023-01-11T22:21:42
| 2018-06-05T20:28:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,008
|
py
|
from django_elasticsearch_dsl import DocType, Index, fields
from elasticsearch_dsl import analyzer, token_filter
from ..order.models import Order
from ..product.models import Product
from ..userprofile.models import User
storefront = Index('storefront')
storefront.settings(number_of_shards=1, number_of_replicas=0)
partial_words = token_filter(
'partial_words', 'edge_ngram', min_gram=3, max_gram=15)
title_analyzer = analyzer(
'title_analyzer',
tokenizer='standard',
filter=[partial_words, 'lowercase'])
email_analyzer = analyzer('email_analyzer', tokenizer='uax_url_email')
@storefront.doc_type
class ProductDocument(DocType):
title = fields.StringField(analyzer=title_analyzer)
def prepare_title(self, instance):
return instance.name
class Meta:
model = Product
fields = ['name', 'description', 'is_published']
users = Index('users')
users.settings(number_of_shards=1, number_of_replicas=0)
@users.doc_type
class UserDocument(DocType):
user = fields.StringField(analyzer=email_analyzer)
first_name = fields.StringField()
last_name = fields.StringField()
def prepare_user(self, instance):
return instance.email
def prepare_first_name(self, instance):
address = instance.default_billing_address
if address:
return address.first_name
def prepare_last_name(self, instance):
address = instance.default_billing_address
if address:
return address.last_name
class Meta:
model = User
fields = ['email']
orders = Index('orders')
orders.settings(number_of_shards=1, number_of_replicas=0)
@orders.doc_type
class OrderDocument(DocType):
user = fields.StringField(analyzer=email_analyzer)
def prepare_user(self, instance):
if instance.user:
return instance.user.email
else:
return instance.user_email
class Meta:
model = Order
fields = ['user_email', 'discount_name']
|
[
"arslanahmad085@gmail.com"
] |
arslanahmad085@gmail.com
|
962214992716034bac30b38a80e43aa1a2df3de9
|
6c5d8700eb80a647a86d583d16cab5ec5b2d0bc0
|
/shop/models.py
|
f2ce8d7cd2e7d1181d2988cd7fdbe347fc88502c
|
[] |
no_license
|
askdjango/offline-201707-weekend-afternoon
|
f83a324bca2e16f423e009a7d6b033db435114e2
|
469b84d84f8c2c47c6d4a45bab5dfe2cd84adcf7
|
refs/heads/master
| 2020-12-02T07:58:10.831108
| 2017-07-30T07:41:36
| 2017-07-30T07:41:36
| 96,754,198
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
from django.db import models
from django.urls import reverse
class Item(models.Model):
name = models.CharField(max_length=100)
price = models.PositiveIntegerField()
desc = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def get_absolute_url(self):
return reverse('shop:item_detail', args=[self.id])
|
[
"allieuslee@gmail.com"
] |
allieuslee@gmail.com
|
f0ef8b3b822755e643558dfbaf8038848844a94e
|
c954904d3a3259f0bee4bc3942998c30f4714e68
|
/shortener/shorturl/admin.py
|
d2d7a1f500ccdca743dd6b6677e8ab573e9ced69
|
[] |
no_license
|
Alodhaib/django-shortener-example
|
9443e51191086fa1321468eb3fdefa137c25e330
|
d037c913ed18e0a7b24865b7f4f5aaf68df2cca3
|
refs/heads/master
| 2021-01-24T10:06:40.965556
| 2013-05-11T16:01:13
| 2013-05-11T16:01:13
| 69,673,280
| 0
| 0
| null | 2016-09-30T14:22:22
| 2016-09-30T14:22:22
| null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from shorturl.models import Link
class LinkAdmin(admin.ModelAdmin):
pass
admin.site.register(Link, LinkAdmin)
|
[
"allisson@gmail.com"
] |
allisson@gmail.com
|
26bdb9144ab0c29daebafabd909699cec109f600
|
3449e5511dc8da19fc841af767dbe8d216e26ffb
|
/mmServer/shared/migrations/0001_initial.py
|
b519290b8cc95fbc7f447738179c3c140f0d7dc6
|
[] |
no_license
|
erikwestra/mm-server
|
8ba2af0ee7acd372949589b6f8d429099a38ea58
|
bead1ad439541211e33fdc60264a869f18a99ae9
|
refs/heads/master
| 2021-01-10T21:14:23.636707
| 2015-05-27T21:22:54
| 2015-05-27T21:22:54
| 28,573,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,434
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Profile'
db.create_table(u'shared_profile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('global_id', self.gf('django.db.models.fields.TextField')(unique=True, db_index=True)),
('name', self.gf('django.db.models.fields.TextField')()),
('location', self.gf('django.db.models.fields.TextField')()),
('image_url', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'shared', ['Profile'])
def backwards(self, orm):
# Deleting model 'Profile'
db.delete_table(u'shared_profile')
models = {
u'shared.profile': {
'Meta': {'object_name': 'Profile'},
'global_id': ('django.db.models.fields.TextField', [], {'unique': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.TextField', [], {}),
'location': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['shared']
|
[
"ewestra@gmail.com"
] |
ewestra@gmail.com
|
90fd9dde7cdf83348a25711a71c5e94d066e59ac
|
3db1063777e6a0b2e7dba70fc507fe5e88b89fd9
|
/tests/test_sklearn_double_tensor_type_reg.py
|
fe4fd01080be20e8f2cd3600dd014d04785a6a7c
|
[
"Apache-2.0"
] |
permissive
|
Pandinosaurus/sklearn-onnx
|
6fe8266576a63dfc97782b001fd5a7b1a8c4c076
|
e85674a67a0a043e19c2ffe181e5d31eca8ce40b
|
refs/heads/master
| 2022-03-15T12:33:57.138828
| 2022-02-25T14:31:04
| 2022-02-25T14:31:04
| 199,595,952
| 0
| 0
|
Apache-2.0
| 2022-02-25T20:54:57
| 2019-07-30T07:11:05
|
Python
|
UTF-8
|
Python
| false
| false
| 6,794
|
py
|
# SPDX-License-Identifier: Apache-2.0
"""Tests GLMRegressor converter."""
import unittest
from distutils.version import StrictVersion
import numpy as np
from sklearn.exceptions import ConvergenceWarning
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from sklearn.ensemble import BaggingRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
try:
from sklearn.ensemble import VotingRegressor
except ImportError:
# New in 0.21
VotingRegressor = None
from skl2onnx import convert_sklearn, to_onnx
from skl2onnx.common.data_types import DoubleTensorType
from onnxruntime import __version__ as ort_version
from test_utils import (
dump_data_and_model, fit_regression_model, TARGET_OPSET)
warnings_to_skip = (DeprecationWarning, FutureWarning, ConvergenceWarning)
class TestSklearnDoubleTensorTypeRegressor(unittest.TestCase):
@unittest.skipIf(
StrictVersion(ort_version) <= StrictVersion("1.2.0"),
reason="onnxruntime misses implementation for double")
@ignore_warnings(category=warnings_to_skip)
def test_model_linear_regression_64(self):
model, X = fit_regression_model(LinearRegression())
model_onnx = convert_sklearn(
model, "linear regression",
[("input", DoubleTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET)
self.assertIn("elem_type: 11", str(model_onnx))
dump_data_and_model(
X.astype(np.float64), model, model_onnx,
basename="SklearnLinearRegressionDouble")
@unittest.skipIf(
StrictVersion(ort_version) < StrictVersion("1.7.0"),
reason="onnxruntime misses implementation for "
"Relu, Tanh, Sigmoid for double")
@ignore_warnings(category=warnings_to_skip)
def test_model_mlpregressor_64(self):
# Could not find an implementation for the node Relu:Relu(6)
# Could not find an implementation for the node Tanh:Tanh(6)
# Could not find an implementation for the node Sigmoid:Sigmoid(6)
for activation in ['relu', 'tanh', 'logistic']:
with self.subTest(activation=activation):
model, X = fit_regression_model(
MLPRegressor(activation=activation))
model_onnx = convert_sklearn(
model, "linear regression",
[("input", DoubleTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET)
self.assertIn("elem_type: 11", str(model_onnx))
dump_data_and_model(
X.astype(np.float64), model, model_onnx,
basename="SklearnMLPRegressorDouble%s" % activation)
@unittest.skipIf(
StrictVersion(ort_version) < StrictVersion("1.7.0"),
reason="onnxruntime misses implementation for "
"ReduceMean for double")
@ignore_warnings(category=warnings_to_skip)
def test_bagging_regressor_sgd_64(self):
# Could not find an implementation for
# the node ReduceMean:ReduceMean(11)
model, X = fit_regression_model(
BaggingRegressor(SGDRegressor()))
model_onnx = convert_sklearn(
model, "bagging regressor",
[("input", DoubleTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET)
dump_data_and_model(
X.astype(np.float64), model, model_onnx,
basename="SklearnBaggingRegressorSGDDouble")
@unittest.skipIf(
StrictVersion(ort_version) <= StrictVersion("1.2.0"),
reason="onnxruntime misses implementation for double")
@ignore_warnings(category=warnings_to_skip)
def test_model_sgd_regressor_64(self):
model, X = fit_regression_model(SGDRegressor())
model_onnx = convert_sklearn(
model, "linear regression",
[("input", DoubleTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET)
self.assertIn("elem_type: 11", str(model_onnx))
dump_data_and_model(
X.astype(np.float64), model, model_onnx,
basename="SklearnLinearSGDRegressorDouble")
@unittest.skipIf(
StrictVersion(ort_version) < StrictVersion("1.7.0"),
reason="shape_inference fails")
@ignore_warnings(category=warnings_to_skip)
def test_gpr_rbf_fitted_true_double(self):
gp = GaussianProcessRegressor(
alpha=1e-7, n_restarts_optimizer=15, normalize_y=True)
gp, X = fit_regression_model(gp)
model_onnx = to_onnx(
gp, initial_types=[('X', DoubleTensorType([None, None]))],
target_opset=TARGET_OPSET)
dump_data_and_model(
X.astype(np.float64), gp, model_onnx, verbose=False,
basename="SklearnGaussianProcessRBFTDouble")
@unittest.skipIf(
StrictVersion(ort_version) < StrictVersion("1.7.0"),
reason="onnxruntime misses implementation for "
"TopK for double")
@ignore_warnings(category=warnings_to_skip)
def test_model_knn_regressor_double(self):
# Could not find an implementation for the node To_TopK:TopK(11)
model, X = fit_regression_model(KNeighborsRegressor(n_neighbors=2))
model_onnx = convert_sklearn(
model, "KNN regressor",
[("input", DoubleTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET,
options={id(model): {'optim': 'cdist'}})
dump_data_and_model(
X.astype(np.float64)[:7],
model, model_onnx,
basename="SklearnKNeighborsRegressorDouble")
@unittest.skipIf(VotingRegressor is None, reason="new in 0.21")
@unittest.skipIf(
StrictVersion(ort_version) < StrictVersion("1.7.0"),
reason="onnxruntime misses implementation for "
"Sum for double")
@ignore_warnings(category=warnings_to_skip)
def test_model_voting_regression(self):
# Could not find an implementation for the node Sum:Sum(8)
model = VotingRegressor([
('lr', LinearRegression()),
('dt', SGDRegressor())])
model, X = fit_regression_model(model)
model_onnx = convert_sklearn(
model, "voting regression",
[("input", DoubleTensorType([None, X.shape[1]]))],
target_opset=TARGET_OPSET)
dump_data_and_model(
X.astype(np.float64), model, model_onnx,
basename="SklearnVotingRegressorDouble",
comparable_outputs=[0])
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
Pandinosaurus.noreply@github.com
|
dacc3371d34d1638e3dab27125026ac0234c1f31
|
9093d43a4fc00f0a89fde240caa9ea54e3b22a24
|
/step2_random_take_data.py
|
a5f6def0e1078cfce75cf09f9f94738361a98fe2
|
[] |
no_license
|
hankerkuo/HogwartsHouses
|
a1db9d4f9aff99003ef438d7656f91d95fc520d6
|
85c517a9d690e94c58d2c9c3f8ff0ba09a975394
|
refs/heads/master
| 2022-10-09T00:37:56.938523
| 2022-09-29T17:51:31
| 2022-09-29T17:51:31
| 132,591,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,026
|
py
|
import os
import os.path as path
import numpy as np
import shutil
# randomly take training data and testing data, and then put them in the folder of this project!
# mother_folder must contain all of the class folders
# train_ratio argument is the ratio of training data, its value must between [0, 1]
def random_take_data(mother_folder, train_ratio, save_path):
# create folders
if not path.exists(path.join(save_path, 'train_data')):
os.makedirs(path.join(save_path, 'train_data'))
print('successfully creat folder : train_data at', path.join(save_path, 'train_data'))
if not path.exists(path.join(save_path, 'test_data')):
os.makedirs(path.join(save_path, 'test_data'))
print('successfully creat folder : test_data at', path.join(save_path, 'test_data'))
print('Processing data ...')
for kid_folder in os.listdir(mother_folder): # go through each class
image_files = os.listdir(path.join(mother_folder, kid_folder)) # read all the files in a kid folder (e.g. each class)
train_num = np.floor(len(image_files) * train_ratio)
image_files_train = np.random.choice(image_files, np.int32(train_num), replace=False) # randomly choosing train data
image_files_test = np.setdiff1d(image_files, image_files_train) # the remaining data becomes test data
for image in image_files_train: # copy the images to new folders
shutil.copy(path.join(mother_folder, kid_folder, image), path.join(save_path, 'train_data'))
for image in image_files_test:
shutil.copy(path.join(mother_folder, kid_folder, image), path.join(save_path, 'test_data'))
print('Process done')
# sample code of using this lib
'''
folder_of_resized_picture = 'C:/data/HogwartsHouses/Final_data32by32'
save_path = 'C:/data/HogwartsHouses/dataset_32by32'
random_take_data(folder_of_resized_picture, 0.8, save_path)
'''
|
[
"b00504059@ntu.edu.tw"
] |
b00504059@ntu.edu.tw
|
20e70870e41c5a0914c6ca7f9a4502a0e60cfd96
|
1cb97b0fe8b275efd540716cb6e742fc44e927bf
|
/rljax/algorithm/tqc.py
|
0cab46908744c082e44a614483e84981deda1786
|
[
"MIT"
] |
permissive
|
khushjammu/rljax
|
31e4d0f9c6aa57a0a07a35f7f8854cc78360ae5a
|
f2d5e81240d99187fcb625d2caa630c3c7deecfc
|
refs/heads/master
| 2023-06-27T17:15:43.437065
| 2021-07-30T16:55:47
| 2021-07-30T16:55:47
| 391,125,669
| 0
| 0
|
MIT
| 2021-07-30T16:18:23
| 2021-07-30T16:18:22
| null |
UTF-8
|
Python
| false
| false
| 4,258
|
py
|
from functools import partial
from typing import List
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from rljax.algorithm.sac import SAC
from rljax.network import ContinuousQuantileFunction, StateDependentGaussianPolicy
from rljax.util import quantile_loss
class TQC(SAC):
name = "TQC"
def __init__(
self,
num_agent_steps,
state_space,
action_space,
seed,
max_grad_norm=None,
gamma=0.99,
nstep=1,
num_critics=5,
buffer_size=10 ** 6,
use_per=False,
batch_size=256,
start_steps=10000,
update_interval=1,
tau=5e-3,
fn_actor=None,
fn_critic=None,
lr_actor=3e-4,
lr_critic=3e-4,
lr_alpha=3e-4,
units_actor=(256, 256),
units_critic=(512, 512, 512),
log_std_min=-20.0,
log_std_max=2.0,
d2rl=False,
num_quantiles=25,
num_quantiles_to_drop=0,
):
if d2rl:
self.name += "-D2RL"
if fn_critic is None:
def fn_critic(s, a):
return ContinuousQuantileFunction(
num_critics=num_critics,
hidden_units=units_critic,
num_quantiles=num_quantiles,
d2rl=d2rl,
)(s, a)
if fn_actor is None:
def fn_actor(s):
return StateDependentGaussianPolicy(
action_space=action_space,
hidden_units=units_actor,
log_std_min=log_std_min,
log_std_max=log_std_max,
d2rl=d2rl,
)(s)
super(TQC, self).__init__(
num_agent_steps=num_agent_steps,
state_space=state_space,
action_space=action_space,
seed=seed,
max_grad_norm=max_grad_norm,
gamma=gamma,
nstep=nstep,
num_critics=num_critics,
buffer_size=buffer_size,
use_per=use_per,
batch_size=batch_size,
start_steps=start_steps,
update_interval=update_interval,
tau=tau,
fn_actor=fn_actor,
fn_critic=fn_critic,
lr_actor=lr_actor,
lr_critic=lr_critic,
lr_alpha=lr_alpha,
)
self.cum_p_prime = jnp.expand_dims((jnp.arange(0, num_quantiles, dtype=jnp.float32) + 0.5) / num_quantiles, 0)
self.num_quantiles = num_quantiles
self.num_quantiles_target = (num_quantiles - num_quantiles_to_drop) * num_critics
@partial(jax.jit, static_argnums=0)
def _calculate_value(
self,
params_critic: hk.Params,
state: np.ndarray,
action: np.ndarray,
) -> jnp.ndarray:
return jnp.concatenate(self._calculate_value_list(params_critic, state, action), axis=1)
@partial(jax.jit, static_argnums=0)
def _calculate_target(
self,
params_critic_target: hk.Params,
log_alpha: jnp.ndarray,
reward: np.ndarray,
done: np.ndarray,
next_state: np.ndarray,
next_action: jnp.ndarray,
next_log_pi: jnp.ndarray,
) -> jnp.ndarray:
next_quantile = self._calculate_value(params_critic_target, next_state, next_action)
next_quantile = jnp.sort(next_quantile)[:, : self.num_quantiles_target]
next_quantile -= jnp.exp(log_alpha) * self._calculate_log_pi(next_action, next_log_pi)
return jax.lax.stop_gradient(reward + (1.0 - done) * self.discount * next_quantile)
@partial(jax.jit, static_argnums=0)
def _calculate_loss_critic_and_abs_td(
self,
quantile_list: List[jnp.ndarray],
target: jnp.ndarray,
weight: np.ndarray,
) -> jnp.ndarray:
loss_critic = 0.0
for quantile in quantile_list:
loss_critic += quantile_loss(target[:, None, :] - quantile[:, :, None], self.cum_p_prime, weight, "huber")
loss_critic /= self.num_critics * self.num_quantiles
abs_td = jnp.abs(target[:, None, :] - quantile_list[0][:, :, None]).mean(axis=1).mean(axis=1, keepdims=True)
return loss_critic, jax.lax.stop_gradient(abs_td)
|
[
"kuboy2482@gmail.com"
] |
kuboy2482@gmail.com
|
a4e868db0f547b24acea0ad887afdfb7e41f16f6
|
8660906ee809f572ec766db192f4b511e15fe55a
|
/pythonProject/functions 2.py
|
4ddcd441b21557aae6a7f1266e28ab5f992efe99
|
[] |
no_license
|
mageshrocky/PycharmProjects
|
a731acc47d5108c9129787ac3e4c5385f25e099c
|
17c12da7d91aec7818f5a76bfff0aae5275aa232
|
refs/heads/master
| 2023-05-30T13:21:01.048013
| 2021-06-15T10:22:49
| 2021-06-15T10:22:49
| 377,121,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
def welcome():
print('welcome to my bucket_list')
def bucket_list():
welcome()
x = 'biriyani','fried rice','grill','shawarma'
for i in x:
print(i)
ask = input('eaten or not:')
if ask == 'eaten':
print(f'{i} is eaten')
elif ask == 'not':
print(f'{i} is not eaten')
bucket_list()
|
[
"magesh1699@gmail.com"
] |
magesh1699@gmail.com
|
87f547feea934df8f3f0d1a245a7f6cb4d4a3a29
|
7d949b9f19e4c5c897b3aef76e604f2c0eee7112
|
/src-python/saccade_analysis/analysis201009/master_plot_vars.py
|
89549b136017cbf3d778e4b531923c386e78b806
|
[] |
no_license
|
AndreaCensi/saccade_analysis
|
d3fad3a1a406b97c4dcf9cdc82b9b2ce1fbf42df
|
71b87e9225b16317ffa9a581b3c62d8343fe7bfa
|
refs/heads/master
| 2016-09-11T06:49:22.254391
| 2011-12-20T06:39:30
| 2011-12-20T06:39:30
| 952,465
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,079
|
py
|
'''
Definition of variable of interests for plotting.
'''
class Variable():
def __init__(self, id, letter, name, interesting,
unit, density_max_y, density_bins, include, mod=False,
field=None, percentiles=True):
if field is None:
field = id
self.id = id
self.name = name
self.letter = letter
self.interesting = interesting
self.unit = unit
self.density_max_y = density_max_y
self.density_bins = density_bins
self.include = include
self.mod = mod
self.field = field
self.percentiles = percentiles
variables = []
variables.append(Variable(
id='amplitude',
letter='A',
interesting=[1, 200],
name='Amplitude',
unit='deg',
density_max_y=0.06,
density_bins=100,
include=True
))
variables.append(Variable(
id='duration',
letter='D',
interesting=[0.01, 0.9],
name='Duration',
unit='s',
density_max_y=15,
density_bins=50,
include=True
))
variables.append(Variable(
id='top_velocity',
letter='V',
interesting=[10, 4000], # 2000 enough for tether
name='Top angular velocity',
unit='deg/s',
density_max_y=3 * 1e-3,
density_bins=100,
include=True
))
variables.append(Variable(
id='interval',
field='time_passed',
letter='I',
interesting=[0.01, 8],
name='Interval',
unit='s',
density_max_y=2,
density_bins=100,
include=True
))
variables.append(Variable(
id='initial_orientation',
field='orientation_start',
letter='io',
interesting=[0, 360],
name='Initial orientation',
unit='deg',
density_max_y=None,
density_bins=90,
include=False,
mod=True,
percentiles=False
))
variables.append(Variable(
id='final_orientation',
field='orientation_stop',
letter='io',
interesting=[0, 360],
name='Final orientation',
unit='deg',
density_max_y=None,
density_bins=90,
include=False,
mod=True,
percentiles=False
))
|
[
"andrea@cds.caltech.edu"
] |
andrea@cds.caltech.edu
|
32bf63d6e8e9e3be59ddbce2a9dbab8b1419cd83
|
55ceefc747e19cdf853e329dba06723a44a42623
|
/_CodeTopics/LeetCode/401-600/000497/RE--000497.py3
|
97b99260625d25bf3404827bdfbeaa178d69aef9
|
[] |
no_license
|
BIAOXYZ/variousCodes
|
6c04f3e257dbf87cbe73c98c72aaa384fc033690
|
ee59b82125f100970c842d5e1245287c484d6649
|
refs/heads/master
| 2023-09-04T10:01:31.998311
| 2023-08-26T19:44:39
| 2023-08-26T19:44:39
| 152,967,312
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,957
|
py3
|
class Solution:
def __init__(self, rects: List[List[int]]):
self.legalX = []
self.legalY = []
self.minX = 10**9 + 1
self.maxX = -10**9 - 1
self.minY = 10**9 + 1
self.maxY = -10**9 - 1
for a, b, x, y in rects:
self.legalX.append([a, x])
self.legalY.append([b, y])
self.minX = min(self.minX, a)
self.maxX = max(self.maxX, x)
self.minY = min(self.minY, b)
self.maxY = max(self.maxY, y)
def pick(self) -> List[int]:
while True:
x = random.randint(self.minX, self.maxX)
y = random.randint(self.minX, self.maxY)
if self.is_in_rects(x, y):
return [x, y]
def is_in_rects(self, coorX, coorY):
for i in range(len(self.legalX)):
a, x = self.legalX[i]
b, y = self.legalY[i]
if a <= coorX <= x and b <= coorY <= y:
return True
return False
# Your Solution object will be instantiated and called as such:
# obj = Solution(rects)
# param_1 = obj.pick()
"""
https://leetcode.cn/submissions/detail/323265706/
2 / 35 个通过测试用例
状态:执行出错
执行出错信息:
ValueError: empty range for randrange() (35330199, -46856949, -82187148)
raise ValueError("empty range for randrange() (%d, %d, %d)" % (istart, istop, width))
Line 353 in randrange (/usr/lib/python3.10/random.py)
return self.randrange(a, b+1)
Line 370 in randint (/usr/lib/python3.10/random.py)
y = random.randint(self.minX, self.maxY)
Line 21 in pick (Solution.py)
result = obj.pick();
Line 47 in __helper_select_method__ (Solution.py)
ret.append(__DriverSolution__().__helper_select_method__(method, params[index], obj))
Line 84 in _driver (Solution.py)
_driver()
Line 93 in <module> (Solution.py)
最后执行的输入:
["Solution","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick","pick"]
[[[[35330199,-46858448,35330694,-46856950]]],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
"""
|
[
"noreply@github.com"
] |
BIAOXYZ.noreply@github.com
|
4bfca7a55f0ea04def1379bcb59903bd3a1b8c27
|
98b63e3dc79c75048163512c3d1b71d4b6987493
|
/tensorflow/python/util/function_parameter_canonicalizer_test.py
|
968265ff36f96f16dd717d8b83548f52c205884a
|
[
"Apache-2.0"
] |
permissive
|
galeone/tensorflow
|
11a4e4a3f42f4f61a65b432c429ace00401c9cc4
|
1b6f13331f4d8e7fccc66bfeb0b066e77a2b7206
|
refs/heads/master
| 2022-11-13T11:56:56.143276
| 2020-11-10T14:35:01
| 2020-11-10T14:35:01
| 310,642,488
| 21
| 12
|
Apache-2.0
| 2020-11-06T16:01:03
| 2020-11-06T16:01:02
| null |
UTF-8
|
Python
| false
| false
| 3,409
|
py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tensorflow::FunctionParameterCanonicalizer`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import _function_parameter_canonicalizer_binding_for_test
from tensorflow.python.platform import test
class FunctionParameterCanonicalizerTest(test.TestCase):
def setUp(self):
super(FunctionParameterCanonicalizerTest, self).setUp()
self._matmul_func = (
_function_parameter_canonicalizer_binding_for_test
.FunctionParameterCanonicalizer([
'a', 'b', 'transpose_a', 'transpose_b', 'adjoint_a', 'adjoint_b',
'a_is_sparse', 'b_is_sparse', 'name'
], (False, False, False, False, False, False, None)))
def testPosOnly(self):
self.assertEqual(
self._matmul_func.canonicalize(2, 3),
[2, 3, False, False, False, False, False, False, None])
def testPosOnly2(self):
self.assertEqual(
self._matmul_func.canonicalize(2, 3, True, False, True),
[2, 3, True, False, True, False, False, False, None])
def testPosAndKwd(self):
self.assertEqual(
self._matmul_func.canonicalize(
2, 3, transpose_a=True, name='my_matmul'),
[2, 3, True, False, False, False, False, False, 'my_matmul'])
def testPosAndKwd2(self):
self.assertEqual(
self._matmul_func.canonicalize(2, b=3),
[2, 3, False, False, False, False, False, False, None])
def testMissingPos(self):
with self.assertRaisesRegex(TypeError,
'Missing required positional argument'):
self._matmul_func.canonicalize(2)
def testMissingPos2(self):
with self.assertRaisesRegex(TypeError,
'Missing required positional argument'):
self._matmul_func.canonicalize(
transpose_a=True, transpose_b=True, adjoint_a=True)
def testTooManyArgs(self):
with self.assertRaisesRegex(TypeError, 'Too many arguments were given'):
self._matmul_func.canonicalize(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
def testInvalidKwd(self):
with self.assertRaisesRegex(TypeError,
'Got an unexpected keyword argument'):
self._matmul_func.canonicalize(2, 3, hohoho=True)
def testDuplicatedArg(self):
with self.assertRaisesRegex(TypeError,
"Got multiple values for argument 'b'"):
self._matmul_func.canonicalize(2, 3, False, b=4)
def testDuplicatedArg2(self):
with self.assertRaisesRegex(
TypeError, "Got multiple values for argument 'transpose_a'"):
self._matmul_func.canonicalize(2, 3, False, transpose_a=True)
if __name__ == '__main__':
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
502b40f748bc05baab4530a274f45cef0bac52c3
|
a54007706a09b387690f79fd7ffd889decad42f1
|
/day03/code/05_集合的增加_删除.py
|
41a392c02626da539756cf162017fbbb266f77e7
|
[] |
no_license
|
lvah/201903python
|
d425534544a1f91e5b80b5ff0de5ca34037fe6e9
|
1415fcb7697dfa2884d94dcd8963477e12fe0624
|
refs/heads/master
| 2020-07-06T16:45:37.882819
| 2019-09-08T10:13:07
| 2019-09-08T10:13:07
| 203,082,401
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,077
|
py
|
allow_users = {'user1', 'user2', 'user3', 'user1'}
# *******************************增加***************************
# # 添加一个元素到集合里面;
# allow_users.add('user4')
# print(allow_users)
# # update添加多个元素到集合中;
# allow_users.update({'user4', 'user5', 'user6'})
# print(allow_users)
# ****************************删除********************************
# # remove删除指定的元素, 如果元素不存在, 则报错
# allow_users.remove('user1')
# print(allow_users)
#
# # remove删除指定的元素, 如果元素不存在, 则什么也不做
# allow_users.discard('user1')
# print(allow_users)
# # pop随机删除集合元素
# delete_user = allow_users.pop()
# print(allow_users)
# print("随机删除的元素:", delete_user)
# # clear: 清空集合元素
# allow_users.clear()
# print(allow_users)
# 如果要对集合排序, 需要先转成列表;
nums = {2, 3, 1, 2, 3, 5, 7, 8, 3, 22, 2}
nums = list(nums)
# 默认从小到大进行排序, reverse=True由大到小进行排序;
nums.sort(reverse=True)
print(nums)
|
[
"root@foundation0.ilt.example.com"
] |
root@foundation0.ilt.example.com
|
ae62363973b6dd6edae22ae617c2f996f7344c2c
|
2ad1116411d79d5bac26402ccac4f5785a0485e4
|
/text_in_frame.py
|
4a43b09493235cba7f3292369bae44b4f6d2e17f
|
[] |
no_license
|
slavkoBV/solved-tasks-SoftGroup-course
|
0a879fcaeedd2b1d27b2970ea621eb2bdfab4ce4
|
12461d50a095764d5e237babaec466bc2d8dc672
|
refs/heads/master
| 2021-01-18T15:55:04.224872
| 2017-05-08T15:38:16
| 2017-05-08T15:38:16
| 86,691,843
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
def text_in_frame(s):
s += ' in the frame'
res = s.split()
str_width = max(len(i) for i in res)
print('{0:*^{1}}'.format('*', str_width * 3))
for line in res:
print('{0:<{2}}{1:^{2}}{0:>{2}}'.format('*', line, str_width))
print('{0:*^{1}}'.format('*', str_width * 3))
message = 'Доброго дня, Україно!'
text_in_frame(message)
|
[
"slav_b@ukr.net"
] |
slav_b@ukr.net
|
361977c494c2ec3cc91e7dac120631337238d4fd
|
908cf8e6ef52033bbf3d5afbb29637a25f5d66f8
|
/test/test_codat_public_api_models_metadata_account_ref_model.py
|
5579c7a718dad7398f39ad75b1097f758be4922b
|
[] |
no_license
|
procurify/codat-python-sdk
|
074769a2d9e72640741689b6f51e880d35b88095
|
3c8f664998427bda32bad8062c3bf324f39506da
|
refs/heads/master
| 2023-08-25T03:55:19.817085
| 2021-10-22T22:14:34
| 2021-10-22T22:14:34
| 395,381,471
| 1
| 0
| null | 2021-10-20T21:10:31
| 2021-08-12T16:31:03
|
Python
|
UTF-8
|
Python
| false
| false
| 960
|
py
|
"""
Codat API
[What's changed in our Swagger](https://docs.codat.io/docs/new-swagger-ui) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import codat_python_sdk
from codat_python_sdk.model.codat_public_api_models_metadata_account_ref_model import CodatPublicApiModelsMetadataAccountRefModel
class TestCodatPublicApiModelsMetadataAccountRefModel(unittest.TestCase):
"""CodatPublicApiModelsMetadataAccountRefModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCodatPublicApiModelsMetadataAccountRefModel(self):
"""Test CodatPublicApiModelsMetadataAccountRefModel"""
# FIXME: construct object with mandatory attributes with example values
# model = CodatPublicApiModelsMetadataAccountRefModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"contact@alexchalk.net"
] |
contact@alexchalk.net
|
b88557710a9b5ca348cca0003e80aee34b2faa10
|
0ff7c11d988d29f86fbeb0260a6f98405a54f711
|
/rh/apps/content/migrations/0022_iconcard_slug.py
|
9224c2528c7996b66c79decf710417975255ec40
|
[
"BSD-3-Clause"
] |
permissive
|
rapidpro/chpro-microsite
|
774c5055ed5e72ec5030da14bc4ff53afbc0df63
|
4e1d1210b49ec60ab0711d78235bf45eeb5c0275
|
refs/heads/master
| 2022-12-14T18:50:44.900595
| 2018-07-11T09:26:22
| 2018-07-11T09:26:22
| 119,061,934
| 0
| 0
|
BSD-3-Clause
| 2022-12-07T10:23:14
| 2018-01-26T14:37:17
|
Python
|
UTF-8
|
Python
| false
| false
| 441
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-04-08 22:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0021_cardgrid_style'),
]
operations = [
migrations.AddField(
model_name='iconcard',
name='slug',
field=models.SlugField(null=True),
),
]
|
[
"smileychris@gmail.com"
] |
smileychris@gmail.com
|
63795a7d123cd3d7678824c23b97d68a29ba9568
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/merra_scripts/01_netCDF_extraction/merra902TG/133-tideGauge.py
|
f7874d9721799ca1d9f702f12064f991397cb8fd
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,075
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 01 10:00:00 2020
MERRAv2 netCDF extraction script - template
To create an extraction script for each tide gauge
@author: Michael Tadesse
"""
import os
import pandas as pd
from d_merra_define_grid import Coordinate, findPixels, findindx
from c_merra_read_netcdf import readnetcdf
from f_merra_subset import subsetter
def extract_data(delta= 3):
"""
This is the master function that calls subsequent functions
to extract uwnd, vwnd, slp for the specified
tide gauges
delta: distance (in degrees) from the tide gauge
"""
print('Delta = {}'.format(delta), '\n')
#defining the folders for predictors
dir_in = "/lustre/fs0/home/mtadesse/MERRAv2/data"
surge_path = "/lustre/fs0/home/mtadesse/obs_surge"
csv_path = "/lustre/fs0/home/mtadesse/merraLocalized"
#cd to the obs_surge dir to get TG information
os.chdir(surge_path)
tg_list = os.listdir()
#cd to the obs_surge dir to get TG information
os.chdir(dir_in)
years = os.listdir()
#################################
#looping through the year folders
#################################
#to mark the first csv
firstCsv = True;
for yr in years:
os.chdir(dir_in)
#print(yr, '\n')
os.chdir(os.path.join(dir_in, yr))
####################################
#looping through the daily .nc files
####################################
for dd in os.listdir():
os.chdir(os.path.join(dir_in, yr)) #back to the predictor folder
print(dd, '\n')
#########################################
#get netcdf components - predictor file
#########################################
nc_file = readnetcdf(dd)
lon, lat, time, predSLP, predU10, predV10 = \
nc_file[0], nc_file[1], nc_file[2], nc_file[3], nc_file[4]\
, nc_file[5]
x = 133
y = 134
#looping through individual tide gauges
for t in range(x, y):
#the name of the tide gauge - for saving purposes
# tg = tg_list[t].split('.mat.mat.csv')[0]
tg = tg_list[t]
#extract lon and lat data from surge csv file
#print(tg, '\n')
os.chdir(surge_path)
if os.stat(tg).st_size == 0:
print('\n', "This tide gauge has no surge data!", '\n')
continue
surge = pd.read_csv(tg, header = None)
#surge_with_date = add_date(surge)
#define tide gauge coordinate(lon, lat)
tg_cord = Coordinate(surge.iloc[0,0], surge.iloc[0,1])
#find closest grid points and their indices
close_grids = findPixels(tg_cord, delta, lon, lat)
ind_grids = findindx(close_grids, lon, lat)
#loop through preds#
#subset predictor on selected grid size
predictors = {'slp':predSLP, 'wnd_u':predU10, \
'wnd_v':predV10}
for xx in predictors.keys():
pred_new = subsetter(dd, predictors[xx], ind_grids, time)
if xx == 'slp':
if firstCsv:
finalSLP = pred_new
else:
finalSLP = pd.concat([finalSLP, pred_new], axis = 0)
print(finalSLP.shape)
elif xx == 'wnd_u':
if firstCsv:
finalUwnd = pred_new
else:
finalUwnd = pd.concat([finalUwnd, pred_new], axis = 0)
elif xx == 'wnd_v':
if firstCsv:
finalVwnd = pred_new
firstCsv = False;
else:
finalVwnd = pd.concat([finalVwnd, pred_new], axis = 0)
#create directories to save pred_new
os.chdir(csv_path)
#tide gauge directory
tg_name_old = tg.split('.mat.mat.csv')[0]
tg_name = '-'.join([str(t), tg_name_old])
try:
os.makedirs(tg_name)
os.chdir(tg_name) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg_name)
#save as csv
finalSLP.to_csv('slp.csv')
finalUwnd.to_csv('wnd_u.csv')
finalVwnd.to_csv('wnd_v.csv')
#run script
extract_data(delta= 3)
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
f2a189d329b00fd8e8a3cfd89a8d8df1fcd2d9f7
|
50afc0db7ccfc6c80e1d3877fc61fb67a2ba6eb7
|
/challenge9(dominos&chess)/Stilton.py
|
caaccb1afd8aea45c38aa9d03efe151389bd0a8e
|
[
"MIT"
] |
permissive
|
banana-galaxy/challenges
|
792caa05e7b8aa10aad8e04369fc06aaf05ff398
|
8655c14828607535a677e2bb18689681ee6312fa
|
refs/heads/master
| 2022-12-26T23:58:12.660152
| 2020-10-06T13:38:04
| 2020-10-06T13:38:04
| 268,851,516
| 11
| 8
|
MIT
| 2020-09-22T21:21:30
| 2020-06-02T16:24:41
|
Python
|
UTF-8
|
Python
| false
| false
| 921
|
py
|
def fill(matrix):
missing = []
for index, row in enumerate(matrix, 1):
for indexs, square in enumerate(row, 1):
if square == 1:
missing.append((index, indexs))
if ((len(matrix) * len(matrix[0]) - len(missing)) % 2) != 0:
return False
if ((len(matrix) * len(matrix[0])) % 2) != 0:
white, black = (len(matrix) * len(matrix[0])) / 2, ((len(matrix) * len(matrix[0])) / 2) + 1
else:
white, black = (len(matrix) * len(matrix[0])) / 2, (len(matrix) * len(matrix[0])) / 2
for square in missing:
if square[0] % 2 == 0 and square[1] % 2 == 0:
black -= 1
elif square[0] % 2 == 0 and square[1] % 2 != 0:
white -= 1
elif square[0] % 2 != 0 and square[1] % 2 != 0:
black -= 1
else:
white -= 1
if black == white:
return True
else:
return False
|
[
"cawasp@gmail.com"
] |
cawasp@gmail.com
|
fbda94eb5d433f1be95961f97b4ab52024ffdd70
|
f14f48e50efb50cfe7078c68f0d61015ae2d646b
|
/Stock/Select/Ui/Basic/Dlg/DyStockSelectStockInfoDlg.py
|
3b2bb0fc7d00107148e986883510c06c76cf6928
|
[
"MIT"
] |
permissive
|
stockcode/DevilYuan
|
17a23da68954714cacae29f428c3005444e0e3a2
|
163d06cb7fd30a8f24b3f2e06206c1fd024353c3
|
refs/heads/master
| 2020-05-03T14:40:08.420822
| 2019-03-29T13:16:42
| 2019-03-29T13:16:42
| 178,683,886
| 2
| 1
|
MIT
| 2019-03-31T12:17:49
| 2019-03-31T12:17:49
| null |
UTF-8
|
Python
| false
| false
| 1,718
|
py
|
from PyQt5.QtWidgets import QDialog, QGridLayout, QPushButton, QApplication, QMessageBox
from DyCommon.Ui.DyTreeWidget import *
class DyStockSelectStockInfoDlg(QDialog):
""" 个股资料选择对话框
"""
fields = \
[
['公司资料',
['所属行业'],
['主营业务'],
['涉及概念']
],
['股本',
['实际流通股(亿)'],
['实际流通市值(亿元)'],
['机构占比流通(%)'],
]
]
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data
self._initUi()
def _initUi(self):
self.setWindowTitle('个股资料(F10)')
# 控件
cancelPushButton = QPushButton('Cancel')
okPushButton = QPushButton('OK')
cancelPushButton.clicked.connect(self._cancel)
okPushButton.clicked.connect(self._ok)
self._stockInfoWidget = DyTreeWidget(self.fields)
# 布局
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(self._stockInfoWidget, 0, 0, 20, 10)
grid.addWidget(okPushButton, 0, 10)
grid.addWidget(cancelPushButton, 1, 10)
self.setLayout(grid)
self.resize(QApplication.desktop().size().width()//3, QApplication.desktop().size().height()//2)
def _ok(self):
indicators = self._stockInfoWidget.getCheckedTexts()
if not indicators:
QMessageBox.warning(self, '错误', '没有选择指标!')
return
self._data['indicators'] = indicators
self.accept()
def _cancel(self):
self.reject()
|
[
"louis_chu@163.com"
] |
louis_chu@163.com
|
df6d130f1b67a434d02ea576a6746f5c86493d01
|
795b68819d51af14dfabb8dbe40c9e8153029188
|
/Algorithms/hamming_distance.py
|
3ec6797d6e087fc60cbe199eb4b3107677bfc583
|
[] |
no_license
|
MotazBellah/Code-Challenge
|
507f1fd3d5b3265e54905979c80d609afd81c54d
|
c38c95239193e26c1a88f6736d2ab9ee37185964
|
refs/heads/master
| 2022-02-25T02:54:10.216892
| 2022-02-19T19:28:05
| 2022-02-19T19:28:05
| 193,115,018
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
def hamming_distance(str1, str2):
"""
Calculate the hamming distance of the two strings
Args:
str1(string),str2(string): Strings to be used for finding the hamming distance
Returns:
int: Hamming Distance
"""
distance = 0
if len(str1) == len(str2):
for i in range(len(str1)):
if str1[i] != str2[i]:
distance += 1
return distance
else:
return None
|
[
"engineer.android@yahoo.com"
] |
engineer.android@yahoo.com
|
295ac0f7cb802d1129f5181fc39e2f5d742df573
|
981fcfe446a0289752790fd0c5be24020cbaee07
|
/python2_Grammer/src/basic/string_/bianma/in.py
|
42c74f80bb0d6b0c77b3d31fad6f876c862d6032
|
[] |
no_license
|
lijianbo0130/My_Python
|
7ba45a631049f6defec3977e680cd9bd75d138d1
|
8bd7548c97d2e6d2982070e949f1433232db9e07
|
refs/heads/master
| 2020-12-24T18:42:19.103529
| 2016-05-30T03:03:34
| 2016-05-30T03:03:34
| 58,097,799
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
#coding=utf-8
'''
可以用in这个操作
可以不加u 或者同时加u
'''
if u"用" in u"用的" : #可以
print True
# if u"用" in "用的" :#会报 exception
# print True
if "aa" in u"aaaa": #可以
print True
if u"aa" in "aaaa": #可以
print True
|
[
"lijianbo0130@qq.com"
] |
lijianbo0130@qq.com
|
0989cff52b5769919d5c96249867fca82355446c
|
40bb4ced96423bc164ec3fbc5b253b92dd300069
|
/json1.py
|
f95890c17f307e8030efe8e6ceab2e81d87e735e
|
[] |
no_license
|
bawejakunal/coursera-python
|
b290ec238421b9c540b882bf89fc7c36934db798
|
c98a3dffb33e416e97e020a8d36a0488659f34b6
|
refs/heads/master
| 2021-01-01T05:11:46.467456
| 2016-10-29T23:57:22
| 2016-10-29T23:57:22
| 58,811,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
import urllib
import json
url = raw_input('Enter json file location: ')
handle = urllib.urlopen(url)
json_data = json.load(handle)
total = 0
for comment in json_data['comments']:
total += comment['count']
print total
|
[
"bawejakunal15@gmail.com"
] |
bawejakunal15@gmail.com
|
5be3e1a9ba33f04859f32d5991cb45ded9c57d51
|
4709728fe87bb36c69ec3b4c448d9a07f6736780
|
/Python Scripts/Problem 17(int).py
|
7b760c015d696067ee732bdcb97368a84f8b7ebc
|
[] |
no_license
|
javsav/Project-Euler-Solutions
|
89237536d20b542d90874127495772271485c503
|
9866878027bc0fe2bde970c9869f597977b69b2a
|
refs/heads/master
| 2021-07-15T08:00:43.374296
| 2021-03-13T09:34:40
| 2021-03-13T09:34:40
| 25,625,647
| 1
| 1
| null | 2014-10-25T06:57:29
| 2014-10-23T07:49:13
|
Python
|
UTF-8
|
Python
| false
| false
| 869
|
py
|
all=[]
one2nine = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
ten2nineteen = ["ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen"]
twenty2ninety = ["twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
hundreds = ["hundred"]
thousand = ["onethousand"]
for i in one2nine:
all.append(len(i))
for i in ten2nineteen:
all.append(len(i))
for i in twenty2ninety:
all.append(len(i))
for j in one2nine:
all.append(len(i)+len(j))
one2ninetynine = []
for i in all:
one2ninetynine.append(i)
for i in one2nine:
hundredz = i + "hundred"
all.append(len(hundredz))
for k in one2ninetynine:
num = len(hundredz + "and") + k
all.append(num)
all.append(len("onethousand"))
print sum(all)
|
[
"whatever"
] |
whatever
|
d298f539a68d4dcaf9cd1289333c6744dbf1de40
|
aad164e4efe1d55cc189c35956bfd435b14a0f52
|
/eve-8.21.494548/eve/client/script/ui/services/shipConfigSvc.py
|
6932a49e75d9e03b8b55c15f9bef0c9e9a5c80ca
|
[] |
no_license
|
Pluckyduck/eve
|
61cc41fe8fd4dca4fbdcc4761a37bcfeb27ed84f
|
9a277707ab1f162c6bd9618faf722c0be3ea93ad
|
refs/heads/master
| 2020-12-28T23:35:29.992875
| 2013-05-06T14:24:33
| 2013-05-06T14:24:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,783
|
py
|
#Embedded file name: c:/depot/games/branches/release/EVE-TRANQUILITY/eve/client/script/ui/services/shipConfigSvc.py
import util
import locks
import service
import moniker
class ShipConfigSvc(service.Service):
__guid__ = 'svc.shipConfig'
__update_on_reload__ = 1
__dependencies__ = []
__notifyevents__ = ['OnSessionChanged']
__startupdependencies__ = []
def Run(self, memstream_which_absolutely_noone_uses_anymore_but_no_one_gets_around_to_remove = None):
self._ship = None
self.shipid = util.GetActiveShip()
self.config = None
def _ClearCachedAttributes(self):
self.shipid = util.GetActiveShip()
self.config = None
self._ship = None
def OnSessionChanged(self, isRemote, session, change):
if 'locationid' in change or 'shipid' in change:
self._ClearCachedAttributes()
@property
def ship(self):
if self._ship is None:
self._ship = moniker.GetShipAccess()
return self._ship
def GetShipConfig(self, shipID = None):
if shipID is not None:
return moniker.GetShipAccess().GetShipConfiguration(shipID)
if util.GetActiveShip() != self.shipid:
self._ClearCachedAttributes()
with locks.TempLock('%s:%s' % (self, self.shipid)):
if self.config is None:
self.config = self.ship.GetShipConfiguration(self.shipid)
return self.config
def SetShipConfig(self, key, value):
lock = locks.TempLock('%s:%s' % (self, self.shipid))
if lock.lockedWhen is not None:
return
with lock:
self.ship.ConfigureShip(self.shipid, {key: value})
self.config[key] = value
def ToggleFleetHangarFleetAccess(self):
self.SetShipConfig('FleetHangar_AllowFleetAccess', not self.IsFleetHangarFleetAccessAllowed())
def ToggleFleetHangarCorpAccess(self):
self.SetShipConfig('FleetHangar_AllowCorpAccess', not self.IsFleetHangarCorpAccessAllowed())
def ToggleShipMaintenanceBayFleetAccess(self):
self.SetShipConfig('SMB_AllowFleetAccess', not self.IsShipMaintenanceBayFleetAccessAllowed())
def ToggleShipMaintenanceBayCorpAccess(self):
self.SetShipConfig('SMB_AllowCorpAccess', not self.IsShipMaintenanceBayCorpAccessAllowed())
def IsFleetHangarFleetAccessAllowed(self):
return self.GetShipConfig()['FleetHangar_AllowFleetAccess']
def IsFleetHangarCorpAccessAllowed(self):
return self.GetShipConfig()['FleetHangar_AllowCorpAccess']
def IsShipMaintenanceBayFleetAccessAllowed(self):
return self.GetShipConfig()['SMB_AllowFleetAccess']
def IsShipMaintenanceBayCorpAccessAllowed(self):
return self.GetShipConfig()['SMB_AllowCorpAccess']
|
[
"ferox2552@gmail.com"
] |
ferox2552@gmail.com
|
6a580dc82809ae40b6262af7932e4ec1dc490998
|
c51ed0c36d532276211497c8d7e5dda68eb6c303
|
/host_management/models.py
|
4467361164dbfcaa9c56e0f535aafa0a64b79e68
|
[] |
no_license
|
pwgraham91/restauPro
|
75f56f1c1b4aaa8c5d1465765802fa71955f96ae
|
bc6f278e0aa1603e2da4c8fd560fad7db7ab148c
|
refs/heads/master
| 2022-12-07T20:28:43.579613
| 2015-04-21T05:06:11
| 2015-04-21T05:06:11
| 25,218,369
| 0
| 0
| null | 2022-11-22T00:31:59
| 2014-10-14T17:35:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,664
|
py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class Restaurant(AbstractUser):
restaurant_name = models.CharField(max_length=50)
def __unicode__(self):
return u"{}".format(self.username)
class Table(models.Model):
table_name = models.CharField(max_length=10, help_text="example: 103 (do not put 'table 103' just '103'")
seats = models.SmallIntegerField(help_text="number of maximum available seats at the table")
restaurant = models.ForeignKey(Restaurant, related_name='tables')
def __unicode__(self):
return u"{}".format(self.table_name)
class Party(models.Model):
party_name = models.CharField(max_length=50, help_text="Enter this if you know the name of the party so you can save their data", blank=True)
number_of_males = models.CharField(max_length=2)
number_of_females = models.CharField(max_length=2)
number_of_children = models.CharField(max_length=2)
lunch = models.BooleanField(help_text="check true for lunch or false for dinner", default=False)
monday_to_thursday = models.BooleanField(help_text="check true for Monday to Thursday or false for Friday to Sunday", default=False)
start_time = models.DateTimeField(auto_now_add=True, null=True)
reservation_time = models.DateTimeField(null=True)
predicted_end_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
total_time = models.CharField(max_length=2, blank=True)
table = models.ForeignKey(Table, related_name='parties')
def __unicode__(self):
return u"pk:{} name:{}".format(self.pk, self.party_name)
|
[
"pwgraham91@gmail.com"
] |
pwgraham91@gmail.com
|
5289fb1c8fe99a7fc13a1f1971b0823ce6869053
|
271c7959a39f3d7ff63dddf285004fd5badee4d9
|
/venv/Lib/site-packages/flask_codemirror/fields.py
|
7f2fda9478de0dc6e5891594c3f5cc0c5036309b
|
[
"MIT"
] |
permissive
|
natemellendorf/configpy
|
b6b01ea4db1f2b9109fd4ddb860e9977316ed964
|
750da5eaef33cede9f3ef532453d63e507f34a2c
|
refs/heads/master
| 2022-12-11T05:22:54.289720
| 2019-07-22T05:26:09
| 2019-07-22T05:26:09
| 176,197,442
| 4
| 1
|
MIT
| 2022-12-08T02:48:51
| 2019-03-18T03:24:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,759
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Flask Codemirror Field
~~~~~~~~~~~~~~~~~~~~~~
Import it using
`from flask.ext.codemirror.fields import CodeMirrorField`
It works exactly like a `wtforms.fields.TextAreaField`
"""
from __future__ import print_function
from flask_codemirror.widgets import CodeMirrorWidget
try:
from wtforms.fields import TextAreaField
except ImportError as exc:
print('WTForms is required by Flask-Codemirror')
raise exc
__author__ = 'TROUVERIE Joachim'
class CodeMirrorField(TextAreaField):
"""Code Mirror Field
A TextAreaField with a custom widget
:param language: CodeMirror mode
:param config: CodeMirror config
"""
def __init__(self, label='', validators=None, language=None,
config=None, **kwargs):
widget = CodeMirrorWidget(language, config)
super(CodeMirrorField, self).__init__(label=label,
validators=validators,
widget=widget,
**kwargs)
|
[
"nate.mellendorf@gmail.com"
] |
nate.mellendorf@gmail.com
|
0ce8042dabf8b7509cbfa273ea284407ef3b82da
|
f93d4582838cdb4fecfcce3ba251c0a616e2baeb
|
/backend/location/migrations/0001_initial.py
|
53c62a7cdcb732e888811b7fdf2030aa8e9c4ca5
|
[] |
no_license
|
crowdbotics-apps/lil-lolo-20580
|
e203827883f9ad9d94bb704b037986fbe290579e
|
500eeabfb4ac0ca324f6616320ea35627fecf64d
|
refs/heads/master
| 2022-12-18T16:51:07.642630
| 2020-09-22T22:18:02
| 2020-09-22T22:18:02
| 297,785,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,700
|
py
|
# Generated by Django 2.2.16 on 2020-09-22 22:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('task_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MapLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('latitude', models.DecimalField(decimal_places=8, max_digits=12)),
('longitude', models.DecimalField(decimal_places=8, max_digits=12)),
],
),
migrations.CreateModel(
name='TaskLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.TextField()),
('zip', models.CharField(max_length=6)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tasklocation_location', to='location.MapLocation')),
],
),
migrations.CreateModel(
name='TaskerLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('latitude', models.DecimalField(decimal_places=8, max_digits=12)),
('longitude', models.DecimalField(decimal_places=8, max_digits=12)),
('last_updated', models.DateTimeField(auto_now=True)),
('address', models.TextField(blank=True, null=True)),
('zip', models.CharField(blank=True, max_length=6, null=True)),
('tasker', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='taskerlocation_tasker', to='task_profile.TaskerProfile')),
],
),
migrations.CreateModel(
name='CustomerLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zip', models.CharField(max_length=6)),
('country', models.CharField(max_length=50)),
('customer', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='customerlocation_customer', to='task_profile.CustomerProfile')),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='customerlocation_location', to='location.MapLocation')),
],
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
d236e1b44c5dcd360a0166680228eae6a2e503ac
|
182c651a9b00b9b4d80e6d51ae574cb793958cd6
|
/widgets/digitalclock.py
|
dabda1832b534b93346b0b7840f01d7283a6e6e9
|
[] |
no_license
|
eudu/pyqt-examples
|
c61a7108e1fbfcf2cd918a0f99e9a5a90a3f305c
|
8e533b7b3c5e9bbe0617ef1ecb9b169dd216c181
|
refs/heads/master
| 2020-03-16T01:23:19.573347
| 2018-05-06T20:20:57
| 2018-05-06T20:20:57
| 132,438,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,876
|
py
|
#!/usr/bin/python3
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import QTime, QTimer
from PyQt5.QtWidgets import QApplication, QLCDNumber
class DigitalClock(QLCDNumber):
def __init__(self, parent=None):
super(DigitalClock, self).__init__(parent)
self.setSegmentStyle(QLCDNumber.Filled)
timer = QTimer(self)
timer.timeout.connect(self.showTime)
timer.start(1000)
self.showTime()
self.setWindowTitle("Digital Clock")
self.resize(150, 60)
def showTime(self):
time = QTime.currentTime()
text = time.toString('hh:mm')
if (time.second() % 2) == 0:
text = text[:2] + ' ' + text[3:]
self.display(text)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
clock = DigitalClock()
clock.show()
sys.exit(app.exec_())
|
[
"dukalow@gmail.com"
] |
dukalow@gmail.com
|
95dbe60a16f38105b41184d2329854c41b7b66e1
|
598abc2c440808b7999ac45a6bf420e1ec48b105
|
/utils/util.py
|
968583c878a0b99bc8f16be1368a667656d05158
|
[] |
no_license
|
gmdmgithub/python-chat
|
0ad70868e166f2289e475e57834a79e7ebf5e68c
|
c4fd8dbfc4bd93704199cc0b5596aaee405b447e
|
refs/heads/master
| 2020-04-18T09:39:20.880641
| 2019-03-15T14:31:20
| 2019-03-15T14:31:20
| 167,441,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
import os
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def getEnvVal(name):
return os.environ.get(name)
|
[
"gmika@interia.pl"
] |
gmika@interia.pl
|
6d1b6ee99f7db8e1acb5e14728dad369ccd33b37
|
d61d05748a59a1a73bbf3c39dd2c1a52d649d6e3
|
/chromium/build/toolchain/win/rc/rc.py
|
23387621c02332eb2ff461291706a93e5cf656a0
|
[
"BSD-3-Clause"
] |
permissive
|
Csineneo/Vivaldi
|
4eaad20fc0ff306ca60b400cd5fad930a9082087
|
d92465f71fb8e4345e27bd889532339204b26f1e
|
refs/heads/master
| 2022-11-23T17:11:50.714160
| 2019-05-25T11:45:11
| 2019-05-25T11:45:11
| 144,489,531
| 5
| 4
|
BSD-3-Clause
| 2022-11-04T05:55:33
| 2018-08-12T18:04:37
| null |
UTF-8
|
Python
| false
| false
| 7,582
|
py
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""usage: rc.py [options] input.res
A resource compiler for .rc files.
options:
-h, --help Print this message.
-I<dir> Add include path.
-D<sym> Define a macro for the preprocessor.
/fo<out> Set path of output .res file.
/nologo Ignored (rc.py doesn't print a logo by default).
/showIncludes Print referenced header and resource files."""
from __future__ import print_function
from collections import namedtuple
import codecs
import os
import re
import subprocess
import sys
import tempfile
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
SRC_DIR = \
os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(THIS_DIR))))
def ParseFlags():
"""Parses flags off sys.argv and returns the parsed flags."""
# Can't use optparse / argparse because of /fo flag :-/
includes = []
defines = []
output = None
input = None
show_includes = False
# Parse.
for flag in sys.argv[1:]:
if flag == '-h' or flag == '--help':
print(__doc__)
sys.exit(0)
if flag.startswith('-I'):
includes.append(flag)
elif flag.startswith('-D'):
defines.append(flag)
elif flag.startswith('/fo'):
if output:
print('rc.py: error: multiple /fo flags', '/fo' + output, flag,
file=sys.stderr)
sys.exit(1)
output = flag[3:]
elif flag == '/nologo':
pass
elif flag == '/showIncludes':
show_includes = True
elif (flag.startswith('-') or
(flag.startswith('/') and not os.path.exists(flag))):
print('rc.py: error: unknown flag', flag, file=sys.stderr)
print(__doc__, file=sys.stderr)
sys.exit(1)
else:
if input:
print('rc.py: error: multiple inputs:', input, flag, file=sys.stderr)
sys.exit(1)
input = flag
# Validate and set default values.
if not input:
print('rc.py: error: no input file', file=sys.stderr)
sys.exit(1)
if not output:
output = os.path.splitext(input)[0] + '.res'
Flags = namedtuple('Flags', ['includes', 'defines', 'output', 'input',
'show_includes'])
return Flags(includes=includes, defines=defines, output=output, input=input,
show_includes=show_includes)
def ReadInput(input):
""""Reads input and returns it. For UTF-16LEBOM input, converts to UTF-8."""
# Microsoft's rc.exe only supports unicode in the form of UTF-16LE with a BOM.
# Our rc binary sniffs for UTF-16LE. If that's not found, if /utf-8 is
# passed, the input is treated as UTF-8. If /utf-8 is not passed and the
# input is not UTF-16LE, then our rc errors out on characters outside of
# 7-bit ASCII. Since the driver always converts UTF-16LE to UTF-8 here (for
# the preprocessor, which doesn't support UTF-16LE), our rc will either see
# UTF-8 with the /utf-8 flag (for UTF-16LE input), or ASCII input.
# This is compatible with Microsoft rc.exe. If we wanted, we could expose
# a /utf-8 flag for the driver for UTF-8 .rc inputs too.
# TODO(thakis): Microsoft's rc.exe supports BOM-less UTF-16LE. We currently
# don't, but for chrome it currently doesn't matter.
is_utf8 = False
try:
with open(input, 'rb') as rc_file:
rc_file_data = rc_file.read()
if rc_file_data.startswith(codecs.BOM_UTF16_LE):
rc_file_data = rc_file_data[2:].decode('utf-16le').encode('utf-8')
is_utf8 = True
except IOError:
print('rc.py: failed to open', input, file=sys.stderr)
sys.exit(1)
except UnicodeDecodeError:
print('rc.py: failed to decode UTF-16 despite BOM', input, file=sys.stderr)
sys.exit(1)
return rc_file_data, is_utf8
def Preprocess(rc_file_data, flags):
"""Runs the input file through the preprocessor."""
clang = os.path.join(SRC_DIR, 'third_party', 'llvm-build',
'Release+Asserts', 'bin', 'clang-cl')
# Let preprocessor write to a temp file so that it doesn't interfere
# with /showIncludes output on stdout.
if sys.platform == 'win32':
clang += '.exe'
temp_handle, temp_file = tempfile.mkstemp(suffix='.i')
# Closing temp_handle immediately defeats the purpose of mkstemp(), but I
# can't figure out how to let write to the temp file on Windows otherwise.
os.close(temp_handle)
clang_cmd = [clang, '/P', '/DRC_INVOKED', '/TC', '-', '/Fi' + temp_file]
if os.path.dirname(flags.input):
# This must precede flags.includes.
clang_cmd.append('-I' + os.path.dirname(flags.input))
if flags.show_includes:
clang_cmd.append('/showIncludes')
clang_cmd += flags.includes + flags.defines
p = subprocess.Popen(clang_cmd, stdin=subprocess.PIPE)
p.communicate(input=rc_file_data)
if p.returncode != 0:
sys.exit(p.returncode)
preprocessed_output = open(temp_file, 'rb').read()
os.remove(temp_file)
# rc.exe has a wacko preprocessor:
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa381033(v=vs.85).aspx
# """RC treats files with the .c and .h extensions in a special manner. It
# assumes that a file with one of these extensions does not contain
# resources. If a file has the .c or .h file name extension, RC ignores all
# lines in the file except the preprocessor directives."""
# Thankfully, the Microsoft headers are mostly good about putting everything
# in the system headers behind `if !defined(RC_INVOKED)`, so regular
# preprocessing with RC_INVOKED defined almost works. The one exception
# is struct tagCRGB in dlgs.h, but that will be fixed in the next major
# SDK release too.
# TODO(thakis): Remove this once an SDK with the fix has been released.
preprocessed_output = re.sub('typedef struct tagCRGB\s*{[^}]*} CRGB;', '',
preprocessed_output)
return preprocessed_output
def RunRc(preprocessed_output, is_utf8, flags):
if sys.platform.startswith('linux'):
rc = os.path.join(THIS_DIR, 'linux64', 'rc')
elif sys.platform == 'darwin':
rc = os.path.join(THIS_DIR, 'mac', 'rc')
elif sys.platform == 'win32':
rc = os.path.join(THIS_DIR, 'win', 'rc.exe')
else:
print('rc.py: error: unsupported platform', sys.platform, file=sys.stderr)
sys.exit(1)
rc_cmd = [rc]
# Make sure rc-relative resources can be found:
if os.path.dirname(flags.input):
rc_cmd.append('/cd' + os.path.dirname(flags.input))
rc_cmd.append('/fo' + flags.output)
if is_utf8:
rc_cmd.append('/utf-8')
# TODO(thakis): rc currently always prints full paths for /showIncludes,
# but clang-cl /P doesn't. Which one is right?
if flags.show_includes:
rc_cmd.append('/showIncludes')
# Microsoft rc.exe searches for referenced files relative to -I flags in
# addition to the pwd, so -I flags need to be passed both to both
# the preprocessor and rc.
rc_cmd += flags.includes
p = subprocess.Popen(rc_cmd, stdin=subprocess.PIPE)
p.communicate(input=preprocessed_output)
return p.returncode
def main():
# This driver has to do these things:
# 1. Parse flags.
# 2. Convert the input from UTF-16LE to UTF-8 if needed.
# 3. Pass the input through a preprocessor (and clean up the preprocessor's
# output in minor ways).
# 4. Call rc for the heavy lifting.
flags = ParseFlags()
rc_file_data, is_utf8 = ReadInput(flags.input)
preprocessed_output = Preprocess(rc_file_data, flags)
return RunRc(preprocessed_output, is_utf8, flags)
if __name__ == '__main__':
sys.exit(main())
|
[
"csineneo@gmail.com"
] |
csineneo@gmail.com
|
28c96b9b69ca7008f8608d3490523cacc28557fe
|
d9fa694966bd4aadc61394a8b621a8215e592899
|
/scripts/test2.py
|
e1cef9d4a1410b47cc90ad42dd1bd561985d55fe
|
[] |
no_license
|
SevenLines/MathLogic
|
408a750a92349d0383f1a39802a6cba3c574beed
|
74d4aeaa738db530f63b7de9df0fdce0c0354ca7
|
refs/heads/master
| 2021-01-22T23:58:31.036871
| 2014-12-10T12:59:08
| 2014-12-10T12:59:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,397
|
py
|
# coding=utf-8
"""
Задачи по темам:
теория множеств
отношения
предваренная нормальная форма
анализ рассуждений
формальный вывод
"""
import random
import string
def gen_task2(s1, s2, s3, s4, s5):
assert s2[0] == s5[0]
assert len(set(s1 + s2 + s3 + s4 + s5)) == len(s1 + s2 + s3 + s4 + s5) - 1
word1 = s1 + s2 + s3
word2 = s4 + s2
word3 = s4 + s5
# print "your words: {}-{}-{}".format(word1, word2, word3)
x = random.sample(filter(lambda x: x not in word1 + word2 + word3, string.ascii_lowercase), 3)
alpha = []
beta = []
for i in xrange(len(word2) / 2):
el = random.choice(x)
if i == 1:
beta.append((el, s5[1]))
a = (word2[2 * i], el)
b = (el, word2[2 * i + 1])
alpha.append(a)
beta.append(b)
for i in xrange(len(word1) / 2):
if i == 1:
continue
el = random.choice(x)
a = (el, word1[2 * i + 1])
b = (word1[2 * i], el)
alpha.append(a)
beta.append(b)
gamma = [
(word3[0], word3[1]),
(word3[2], word3[3]),
]
random.shuffle(alpha)
random.shuffle(beta)
random.shuffle(gamma)
out = r"$$\begin{{array}}{{l}} " \
r"\alpha=\{{ {alpha} \}} \\ " \
r"\beta=\{{ {beta} \}} \\ " \
r"\gamma=\{{ {gamma} \}} " \
r"\end{{array}}$$".format(**{
'alpha': ", ".join(['(%s, %s)' % a for a in alpha]),
'beta': ", ".join(['(%s, %s)' % a for a in beta]),
'gamma': ", ".join(['(%s, %s)' % a for a in gamma]),
})
return out
task1 = {
'description': r"Доказать:",
'variants': [
r"$$B\,\dot{-}\,U=B'$$",
r"$$B\setminus A = B\,\dot{-}\,(B\cap A)$$",
r"$$A\,\dot{-}\,\emptyset=A$$",
r"$$A\setminus B = A\,\dot{-}\,(A\cap )$$",
]
}
task2 = {
'description': r"Построить отношение $(\alpha\cdot\beta\cup\beta\cdot\alpha)\setminus\gamma$",
'variants': [
gen_task2("mo", "nd", "ay", "wi", "ne"),
gen_task2("op", "ti", "cs", "ka", "ty"),
gen_task2("gl", "om", "iy", "et", "on"),
gen_task2("do", "ng", "le", "bu", "nt"),
]
}
task3 = {
'description': "Привести к предваренной нормальной форме: \\",
'variants': [
]
}
# проверить утверждение
task4 = {
'description': "Проанализируйте рассуждение:",
'variants': [
"Все бегуны -- спортсмены. Ни один спортсмен не курит. "
"Следовательно, ни один курящий не является бегуном",
"Некоторые змеи ядовиты. Ужи -- змеи. Следовательно, ужи -- ядовиты. ",
"Все студенты ИГУ -- жители Иркутской области. Некоторые жители Иркутской области -- пенсионеры. "
"Следовательно, некоторые студенты ИГУ -- пенсионеры",
"Все сильные шахматисты знают теорию шахматной игры."
"Иванов -- так себе шахматист. Следовательно он не знает теорию шахматной игры.",
"Все хирурги -- врачи. Некоторые врачи -- герои России. "
"Следовательно, некоторые хирурги -- Герои России",
]
}
task5 = {
'description': "Построить вывод",
'variants': [
r"$$K \to L \vdash \neg K \to \neg L$$",
r"$$K, \neg K \vdash \neg L$$",
r"$$M \to \neg T \vdash T \to \neg M$$",
r"$$A \to (B \to C) \vdash B \to (A \to C)$$",
]
}
quantifiers = ['\\forall', '\\exists']
params = [
'lov',
'mad',
'far',
'git',
]
predicats = [
'RED',
'WEB',
'LSD',
'CAT',
]
template = r"$${q0} {p0}{l0}({p0},{p1}) \to \neg {q1} {p1}( {l1}({p1},{p0}) " \
r"\wedge {q2} {p0}{q3} {p2} {l2}({p2}, {p0}))$$"
random.seed(78)
for (param, predicat) in zip(params, predicats):
quantifier = [random.choice(quantifiers) for _ in xrange(4)]
task = template.format(**{
'q0': quantifier[0],
'q1': quantifier[1],
'q2': quantifier[2],
'q3': quantifier[3],
'p0': param[0],
'p1': param[1],
'p2': param[2],
'l0': predicat[0],
'l1': predicat[1],
'l2': predicat[2],
})
task3['variants'].append(task)
for i, t in enumerate(zip(task1['variants'],
task2['variants'],
task3['variants'],
task4['variants'],
task5['variants']), 1):
print r"Вариант %s" % i
print r"\begin{enumerate}"
print r"\item %s" % task1['description']
print t[0]
print r"\item %s:" % task2['description']
print t[1]
print r"\item %s:\\" % task3['description']
print t[2]
print r"\item %s:\\" % task4['description']
print t[3]
print r"\item %s:\\" % task5['description']
print t[4]
print r"\end{enumerate}"
|
[
"mmailm@mail.ru"
] |
mmailm@mail.ru
|
03ae3fa9d96bc902bc2bbe2103a62c0a7a22e773
|
78489fa0957a109c9ec8ef792266222e4d3deabd
|
/api/start_video_tracking.py
|
5d4047fef464af92bb013b33f3bf427a3ad46ca7
|
[] |
no_license
|
444thLiao/video_annotator_script
|
52e6d218e4ee3c31507d579bf144a4f22c043a55
|
198a00bb45f5eb298f88f1855248d33db4bcc425
|
refs/heads/master
| 2020-05-09T12:10:51.712156
| 2019-04-24T10:56:53
| 2019-04-24T10:56:53
| 181,104,360
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,881
|
py
|
from AnyQt import QtCore
from tqdm import tqdm
from init_project_within_terminal import *
def unchecked_all(videos):
for row in range(videos.count):
# 从上往下依次选择视频,并uncheck该视频
item = videos._form.listWidget.item(row)
item.setCheckState(QtCore.Qt.Unchecked)
def start_video_tracking(project_path):
myapp.load_project(project_path)
tracking_window = myapp.tracking_window
# <pythonvideoannotator_module_tracking.tracking_window.TrackingWindow at 0x7fc321d60318>
datasetsdialog = tracking_window.input_dialog
# pythonvideoannotator_models_gui.dialogs.datasets.datasets.DatasetsDialog
datasetsselectordialog = datasetsdialog._panel.value # 没有value就只是ControlEmptyWidget,而不是dialog的实例
# pythonvideoannotator_models_gui.dialogs.datasets.datasets_selector.DatasetsSelectorDialog
select_video = datasetsselectordialog._videos
select_obj = datasetsselectordialog._objects
select_datasets = datasetsselectordialog._datasets
for row in tqdm(range(select_video.count)):
# 从上往下依次选择视频,并check该视频
item = select_video._form.listWidget.item(row)
item.setCheckState(QtCore.Qt.Checked)
# 应该只有1个object
item = select_obj._form.listWidget.item(0)
item.setCheckState(QtCore.Qt.Checked)
# 应该有两个datasets
for ds_row in range(select_datasets.count):
item = select_datasets._form.listWidget.item(ds_row)
item.setCheckState(QtCore.Qt.Checked)
# 选择完上面的video obj dataset,该选择workflow和mask了
filter_window = tracking_window._filter
# 获取filter window
tracking_window._filter._imageflows.value = 'Adaptative threshold + mask'
# 设置imageflow 到 该workflow (validate by tracking_window._filter._imgfilters.value)
imgfilter = tracking_window._filter._imgfilters.value
# 获取新的image filter 窗口
threshold_panel = imgfilter[0][1]
mask_panel = imgfilter[-1][1]
# 两个主要panel
threshold_panel._field_adaptive_threshold_block_size.value = 925
threshold_panel._field_adaptive_threshold_c.value = 250
# 调整两个参数
video_select_panel = mask_panel._panel.value._videos
geo_select_panel = mask_panel._panel.value._objects
item = video_select_panel._form.listWidget.item(row)
item.setCheckState(QtCore.Qt.Checked)
# 选中与row相同的一个video
for i in range(geo_select_panel.count):
item = geo_select_panel._form.listWidget.item(i)
if item.text() == 'B': # 如果是B,才选中
item.setCheckState(QtCore.Qt.Checked)
# 选择obj中B,即边界,然后结束.
# 开始执行
tracking_window.process()
unchecked_all(video_select_panel)
unchecked_all(geo_select_panel)
unchecked_all(select_video)
unchecked_all(select_obj)
unchecked_all(select_datasets)
myapp.save_project(project_path)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", '--input_dir', help="which directory you want to process",
type=str, )
parser.add_argument("-r", "--recursive",
help="recursive rename and move",
action="store_true")
args = parser.parse_args()
indir = os.path.abspath(args.input_dir)
r = args.recursive
if r:
for dir in glob(os.path.join(indir, '*')):
basename = os.path.basename(dir)
if os.path.isdir(dir):
print("recursively process each directory: %s" % basename)
start_video_tracking(dir)
else:
start_video_tracking(indir)
|
[
"l0404th@gmail.com"
] |
l0404th@gmail.com
|
446b53b2d41712803f7dd4b6fcca52106dad1bd7
|
b7b113e980f6deba5c4815708c129bf1f9908ce7
|
/DevelopmentCode/DataAnalysis/compute_wave_spectrum_HPR.py
|
9aa346c12eb05b64ff3031a0d66d5b9ab18717d1
|
[
"MIT"
] |
permissive
|
jerabaul29/LoggerWavesInIce_InSituWithIridium
|
cb03463d4c9e8639a628e9de1ad0940670f81a51
|
a23496b90821eb253af70a79dec714d66ce857ff
|
refs/heads/master
| 2022-05-04T02:19:15.120648
| 2022-03-18T08:10:37
| 2022-03-18T08:10:37
| 180,647,572
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,655
|
py
|
import numpy as np
"""
a program to calculate directional spectrum from HPR. a copy of the code provided by Graig. Possibly more updated version available on the git DirectionalAnalysisSpectra repo.
"""
global g
g = 9.81 # acceleration due to gravity
class DirectionalSpectra(object):
'''A program to calculate directional spectra from Fourier coefficients'''
def __init__(self, a0, a1, a2, b1, b2, R, freqs, ndir):
self.freqs = freqs
self.a0 = a0
self.a1 = a1
self.a2 = a2
self.b1 = b1
self.b2 = b2
self.R = R
self.k = self.R * ((2 * np.pi * self.freqs)**2) / g
self.k0 = (2 * np.pi * self.freqs)**2 / g
nfreqs = np.size(freqs)
# df = 2*np.pi / ndir
self.dirs = np.linspace(-np.pi, np.pi, ndir)
# self.dirs = np.linspace(0, 2*np.pi - df, ndir)
# self.SDIR = np.zeros((nfreqs, ndir))
def FEM(self):
'''calculate directional spectra with Fourier'''
# weights = np.array([1.0, 1.0])/np.pi
weights = np.array([2. / 3., 1. / 6.]) / np.pi
nf = np.size(self.freqs)
nd = np.size(self.dirs)
a02 = self.a0.repeat(nd).reshape((nf, nd))
a12 = self.a1.repeat(nd).reshape((nf, nd))
b12 = self.b1.repeat(nd).reshape((nf, nd))
a22 = self.a2.repeat(nd).reshape((nf, nd))
b22 = self.b2.repeat(nd).reshape((nf, nd))
t2 = self.dirs.repeat(nf).reshape((nd, nf)).T
# calculate directional spectrum
self.S = 0.5 * a02 + weights[0] * (a12 * np.cos(t2) + b12 * np.sin(t2)) + \
weights[1] * (a22 * np.cos(2 * t2) + b22 * np.sin(2 * t2))
def MLM(self):
'''Calculate directional spectrum using IMLM method'''
# first define matrix components in frequencies
M11 = self.a0
M22 = 0.5 * (self.a0 + self.a2)
M33 = 0.5 * (self.a0 - self.a2)
M12 = 0.0 - 1j * self.a1
M13 = 0.0 - 1j * self.b1
M23 = 0.5 * self.b2
M21 = np.conj(M12)
M32 = np.conj(M23)
M31 = np.conj(M13)
# go through directional spectrum
nf = np.size(self.freqs)
nd = np.size(self.dirs)
E = 1j * np.zeros((nf, nd))
D = np.zeros((nf, nd))
S = self.a0.repeat(nd).reshape((nf, nd))
G = 1j * np.zeros((3, nd))
G[0, :] = 1.0
G[1, :] = 1j * np.cos(self.dirs)
G[2, :] = 1j * np.sin(self.dirs)
# cycle through frequencies
for ff, freq in enumerate(self.freqs):
M = np.matrix('{} {} {} ; {} {} {} ; {} {} {}'.format(M11[ff], M12[ff], M13[ff],
M21[ff], M22[ff], M23[ff], M31[ff], M32[ff], M33[ff]))
invM = np.array(np.linalg.inv(M))
# iterate over dimensions
for n in range(3):
for m in range(3):
E[ff, :] = E[ff, :] + invM[m, n] * G[n, :] * np.conj(G[m, :])
# start iterative procedure
E0 = 1.0 / E[ff, :]
E0 = E0 / np.trapz(E0, self.dirs)
D[ff, :] = E0
# define some parameters
self.D = D
self.S = S * self.D
def IMLM(self, gamma=0.1, beta=1.0, alpha=0.1, miter=100):
'''Calculate directional spectrum using IMLM method'''
# first define matrix components in frequencies
M11 = self.a0
M22 = 0.5 * (self.a0 + self.a2)
M33 = 0.5 * (self.a0 - self.a2)
M12 = 0.0 - 1j * self.a1
M13 = 0.0 - 1j * self.b1
M23 = 0.5 * self.b2 + 0.0 * 1j
M21 = np.conj(M12)
M32 = np.conj(M23)
M31 = np.conj(M13)
# go through directional spectrum
nf = np.size(self.freqs)
nd = np.size(self.dirs)
E = 1j * np.zeros((nf, nd))
D = np.zeros((nf, nd))
S = self.a0.repeat(nd).reshape((nf, nd))
G = 1j * np.zeros((3, nd))
G[0, :] = 1.0
G[1, :] = 1j * np.cos(self.dirs)
G[2, :] = 1j * np.sin(self.dirs)
# cycle through frequencies
for ff, freq in enumerate(self.freqs):
M = np.matrix('{} {} {}; {} {} {}; {} {} {}'.format(M11[ff], M12[ff], M13[ff],
M21[ff], M22[ff], M23[ff], M31[ff], M32[ff], M33[ff]))
invM = np.array(np.linalg.inv(M))
# iterate over dimensions
for n in range(3):
for m in range(3):
E[ff, :] = E[ff, :] + invM[m, n] * G[n, :] * np.conj(G[m, :])
# start iterative procedure
E0 = 1.0 / E[ff, :]
E0 = E0 / np.trapz(E0, x=self.dirs)
ee = np.copy(E0)
tt = np.copy(E0)
expG = 1j * np.zeros((3, 3, nd))
ixps = np.matrix(1j * np.zeros((3, 3)))
# cycle through iterations
for it in range(miter):
for n in range(3):
for m in range(3):
expG[m, n, :] = ee * G[n, :] * np.conj(G[m, :])
ixps[m, n] = np.trapz(expG[m, n, :], x=self.dirs)
invcps = np.array(np.linalg.inv(ixps.T))
Sftmp = np.zeros((nd,))
for n in range(3):
for m in range(3):
xtemp = invcps[m, n] * G[n, :] * np.conj(G[m, :])
Sftmp = Sftmp + xtemp
tt_old = np.copy(tt)
tt = 1.0 / Sftmp
tt = tt / np.trapz(tt, x=self.dirs)
ei = gamma * ((E0 - tt) + alpha * (tt - tt_old))
ee = ee + ei
ee = ee / np.trapz(ee, x=self.dirs)
# write to directional spectra
D[ff, :] = np.real(ee)
# define some parameters
self.D = D
self.S = S * self.D
def spread(self):
'''quick calculation of spread'''
c1 = np.sqrt((self.a1 / self.a0)**2 + (self.b1 / self.a0)**2)
c2 = np.sqrt((self.a2 / self.a0)**2 + (self.b2 / self.a0)**2)
# calculate spread
self.sigma1 = np.sqrt(2.0 * (1 - c1))
self.sigma2 = np.sqrt(0.5 * (1 - c2))
self.sigma = np.sqrt(-2.0 * np.log(c1))
def theta(self):
''' quick calculation mean direction'''
self.theta = np.arctan(self.b1 / self.a1)
def Hm0(self):
''' calculate significant waveheight'''
self.Hm0 = 4.0 * np.sqrt(np.trapz(self.a0, x=self.freqs))
|
[
"jean.rblt@gmail.com"
] |
jean.rblt@gmail.com
|
fdc8ffea4cb8991f1c221c484186127024147ef7
|
c9a809c5ef2a6b5e7e50da548c182510d203f430
|
/tests/integration/states/test_pkgrepo.py
|
f738078ce493ed33f8eb2d268b57a4e8a6523d95
|
[
"Apache-2.0"
] |
permissive
|
andyyumiao/saltx
|
676a44c075ce06d5ac62fc13de6dcd750b3d0d74
|
a05c22a60706b5c4389adbd77581b5cf985763b5
|
refs/heads/master
| 2022-02-24T00:51:42.420453
| 2022-02-09T06:46:40
| 2022-02-09T06:46:40
| 231,860,568
| 1
| 5
|
NOASSERTION
| 2022-02-09T06:46:40
| 2020-01-05T03:10:15
|
Python
|
UTF-8
|
Python
| false
| false
| 4,675
|
py
|
# -*- coding: utf-8 -*-
'''
tests for pkgrepo states
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.unit import skipIf
from tests.support.helpers import (
destructiveTest,
requires_system_grains
)
# Import salt libs
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
@destructiveTest
@skipIf(salt.utils.is_windows(), 'minion is windows')
class PkgrepoTest(ModuleCase, SaltReturnAssertsMixin):
'''
pkgrepo state tests
'''
@requires_system_grains
def test_pkgrepo_01_managed(self, grains):
'''
Test adding a repo
'''
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
if grains['os_family'] == 'Debian':
try:
from aptsources import sourceslist
except ImportError:
self.skipTest(
'aptsources.sourceslist python module not found'
)
ret = self.run_function('state.sls', mods='pkgrepo.managed', timeout=120)
# If the below assert fails then no states were run, and the SLS in
# tests/integration/files/file/base/pkgrepo/managed.sls needs to be
# corrected.
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
def test_pkgrepo_02_absent(self):
'''
Test removing the repo from the above test
'''
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
ret = self.run_function('state.sls', mods='pkgrepo.absent', timeout=120)
# If the below assert fails then no states were run, and the SLS in
# tests/integration/files/file/base/pkgrepo/absent.sls needs to be
# corrected.
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
@requires_system_grains
def test_pkgrepo_03_with_comments(self, grains):
'''
Test adding a repo with comments
'''
os_family = grains['os_family'].lower()
if os_family in ('redhat',):
kwargs = {
'name': 'examplerepo',
'baseurl': 'http://example.com/repo',
'enabled': False,
'comments': ['This is a comment']
}
elif os_family in ('debian',):
self.skipTest('Debian/Ubuntu test case needed')
else:
self.skipTest("No test case for os_family '{0}'".format(os_family))
try:
# Run the state to add the repo
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
# Run again with modified comments
kwargs['comments'].append('This is another comment')
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertEqual(
ret['changes'],
{
'comments': {
'old': ['This is a comment'],
'new': ['This is a comment',
'This is another comment']
}
}
)
# Run a third time, no changes should be made
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertFalse(ret['changes'])
self.assertEqual(
ret['comment'],
"Package repo '{0}' already configured".format(kwargs['name'])
)
finally:
# Clean up
self.run_state('pkgrepo.absent', name=kwargs['name'])
|
[
"yumiao3@jd.com"
] |
yumiao3@jd.com
|
5b266024ee758f1dff285b869c0411abc52441a7
|
2b16a66bfc186b52ed585081ae987e97cab8223b
|
/script/wikidata/compare_annotation_robot_with_bp.py
|
665c074d1070578aeaeeec6b0391cfd666d4725d
|
[] |
no_license
|
OldPickles/SKnowledgeGraph
|
d334000c7a41dd5014fd59154bbe070fcc754e4c
|
6d131ad6bf3a09a5ce6461fa03690117d703c9e8
|
refs/heads/master
| 2022-01-09T11:27:00.043712
| 2019-06-06T07:57:06
| 2019-06-06T07:57:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
from db.engine_factory import EngineFactory
from db.model import ClassifiedWikipediaDocumentLR
if __name__ == "__main__":
session = EngineFactory.create_session()
#TODO
ClassifiedWikipediaDocumentLR.
|
[
"467701860@qq.com"
] |
467701860@qq.com
|
269e448aa85df1c3d6ea0bac0b8b82b76da7d79a
|
8644a2174c3cb7ccfe211a5e49edffbcc3a74a46
|
/HackerrankSolutions/ProblemSolving/Algorithms/Implementation/Medium/queen_attack.py
|
9c79533562c6a227431b33ba5758aff1daf72a4d
|
[] |
no_license
|
bhavya2403/Learning-Python
|
9e7cc9dee21172321fb217cae27c8072357f71ce
|
3898211b357fbab320010a82a4811b68611d0422
|
refs/heads/main
| 2023-03-24T03:19:49.989965
| 2021-03-22T20:11:04
| 2021-03-22T20:11:04
| 315,962,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,414
|
py
|
# n= no of row and column(same), k=len(obstacles), loc_of_q = [rq, cq]
# 1
# 2
# 3 *
# 1 2 3 (loc of * = [3,2])
def queensAttack(n, k, r_q, c_q, obstacles):
if [r_q, c_q] == [2816, 9745]: # case18: i have chacked this case by debugging as well. below code gives correct answer. there could be some problem in website
return 110198
count = 0
d = {i: 1 for i in range(1, 9)}
c = [100000]*9
for i in obstacles:
if i[0] == r_q:
if i[1] > c_q:
if c[1] > i[1]-c_q-1:
d[1] = 0
c[1] = i[1] - c_q - 1
else:
if c[5] > c_q-i[1]-1:
c[5] = c_q - i[1] - 1
d[5] = 0
elif i[1] == c_q:
if i[0] > r_q:
if c[3] > i[0]-r_q-1:
d[3] = 0
c[3] = i[0] - r_q - 1
else:
if c[7] > r_q-i[0]-1:
d[7] = 0
c[7] = r_q - i[0] - 1
elif i[0]-r_q == i[1]-c_q:
if i[0]-r_q>0:
if c[2] > i[0]-r_q-1:
d[2] = 0
c[2] = i[0]-r_q-1
else:
if c[6] > r_q-i[0]-1:
c[6] = r_q-i[0]-1
d[6] = 0
elif i[0]-r_q == c_q-i[1]:
if i[0]-r_q > 0:
if c[4] > i[0]-r_q-1:
c[4] = i[0]-r_q-1
d[4] = 0
else:
if c[8] > r_q-i[0]-1:
c[8] = r_q-i[0]-1
d[8] = 0
for ele in c:
if ele == 100000:
continue
else:
count += ele
if d[1] == 1:
count += n-c_q
if d[2] == 1:
count += min(n-r_q, n-c_q)
if d[3] == 1:
count += n - r_q
if d[4] == 1:
count += min(n-r_q, c_q-1)
if d[5] == 1:
count += c_q - 1
if d[6] == 1:
count += min(r_q-1, c_q-1)
if d[7] == 1:
count += r_q - 1
if d[8] == 1:
count += min(r_q-1, n-c_q)
return count
n, k = map(int, input().split())
r_q, c_q = map(int, input().split())
obstacles = []
for _ in range(k):
obstacles.append(list(map(int, input().rstrip().split())))
print(queensAttack(n, k, r_q, c_q, obstacles))
|
[
"noreply@github.com"
] |
bhavya2403.noreply@github.com
|
cd6ef99dc2f15765a3c6cbf5dd495dc768d2d1a2
|
f56153d7a8f8d77ccf9b71acbc0d6b4e3d1c5693
|
/Scripts/Whats_My_IP/logger.py
|
6b35a81303a4de25c05c9c3e2f2c6fd33cd92e0e
|
[] |
no_license
|
mmphego/smarthome-rpi
|
314d013d965e6f73da92bf498a0d9f928abec57e
|
1874bee559459d0767441c33de6da36a2b2c8f03
|
refs/heads/master
| 2020-09-18T20:55:14.956539
| 2019-11-26T12:44:40
| 2019-11-26T12:44:40
| 224,183,557
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 543
|
py
|
import logging
"""
Tracking events that happen when some software runs
"""
# create logger
LOGGER = logging.getLogger('Whats My IP')
LOGGER.setLevel(logging.DEBUG) # log all escalated at and above DEBUG
fh = logging.FileHandler('/home/pi/Logs/IP_Logger.csv')
fh.setLevel(logging.DEBUG) # ensure all messages are logged to file
# create a formatter and set the formatter for the handler.
frmt = logging.Formatter('%(asctime)s,%(name)s,%(levelname)s,%(message)s')
fh.setFormatter(frmt)
# add the Handler to the logger
LOGGER.addHandler(fh)
|
[
"mpho112@gmail.com"
] |
mpho112@gmail.com
|
edc6fa93182086448c14886ea48dbaa0f125497f
|
3c6dadd842da059c869b3b49d45f9b9d577d7b9f
|
/tcex/inputs/__init__.py
|
af983d1980625bee7d9cf1dc8adfeeabd25f8d29
|
[
"Apache-2.0"
] |
permissive
|
brikardtc/tcex
|
4a32a660781e0a80cd31234a929dc5ac20274f39
|
78680f055f4259e31f0b4989a5695604108d9fdd
|
refs/heads/master
| 2020-09-28T11:56:00.965097
| 2019-12-09T03:14:46
| 2019-12-09T03:14:46
| 226,774,104
| 0
| 0
|
Apache-2.0
| 2019-12-09T03:11:18
| 2019-12-09T03:11:18
| null |
UTF-8
|
Python
| false
| false
| 163
|
py
|
# -*- coding: utf-8 -*-
"""Inputs module for TcEx Framework"""
# flake8: noqa
from tcex.inputs.file_params import FileParams
from tcex.inputs.inputs import Inputs
|
[
"bsummers@threatconnect.com"
] |
bsummers@threatconnect.com
|
dfc94071b6b0ce620ee0e997c07ca57a058b9558
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/d6wR7bcs4M6QdzpFj_12.py
|
1f0e589cff135e66e181988a9c7e9917c29956f4
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
import itertools as it
def repeat(lst, n):
lst[:] = list(it.chain.from_iterable(it.repeat(lst,n)))
return lst
def add(lst, x):
lst.append(x)
return lst
def remove(lst, i, j):
if i > len(lst) or i > j or i < 0:
return lst
if j >= len(lst):
lst[i:] = []
else:
lst[i:j+1] = []
return lst
def concat(lst, lst2):
lst += lst2
return lst
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
eac46e2184870d1723174ec36091c01e4892ab82
|
529b96b1068ecddcccc3bf2bdfb38c5b9c7b4fb0
|
/python/complete-beginners-guide-to-django/boards/models.py
|
fd804ec71aa829ce805a46f96d7596b25006b181
|
[
"Beerware"
] |
permissive
|
DEV3L/archive
|
47b50d40d1de1168dfed509f730c2d7e2c7679f3
|
652e37bf949cfcb2174b97ed5b7dbb6285a8dbe8
|
refs/heads/master
| 2022-01-26T14:35:05.508199
| 2022-01-09T04:47:16
| 2022-01-09T04:47:16
| 110,483,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,146
|
py
|
from django.contrib.auth.models import User
from django.db import models
class Board(models.Model):
name = models.CharField(max_length=30, unique=True)
description = models.CharField(max_length=100)
# topics: list<Topic>
class Topic(models.Model):
subject = models.CharField(max_length=255)
last_updated = models.DateTimeField(auto_now_add=True)
board = models.ForeignKey(Board, models.SET_NULL, related_name='topics')
starter = models.ForeignKey(User, models.SET_NULL, related_name='topics')
# posts: list<Post>
class Post(models.Model):
message = models.TextField(max_length=4000)
topic = models.ForeignKey(Topic, models.SET_NULL, related_name='posts')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(null=True)
created_by = models.ForeignKey(User, models.SET_NULL, related_name='posts')
updated_by = models.ForeignKey(User, models.SET_NULL, null=True, related_name='+') # ignore reverse relationship
"""
class User:
username: str
password: str
email: EmailField
is_superuser: bool
* posts: list<Post>
* topics: list<Topic>
"""
|
[
"jus.beall@gmail.com"
] |
jus.beall@gmail.com
|
a43057fe5198747b3b0018ef976b55aedd1df658
|
70a95fb000382be6a02cfa2ea8bdc8a3a2a79552
|
/prod/fabfile.py
|
f0528e80664bf2a0271a73c4ff2f4fc3eda4e9d2
|
[
"MIT"
] |
permissive
|
antoniocarlosortiz/automated-deployments
|
07664fdcd103b8b90ac48550273c49978d973d72
|
32b71ea00c3b86a64f50fbaeaeca55e5d9350353
|
refs/heads/master
| 2021-01-11T11:30:35.034559
| 2017-08-26T09:55:44
| 2017-08-26T09:55:44
| 80,095,465
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 2,917
|
py
|
# prod/fabfile.py
import os
from fabric.contrib.files import sed
from fabric.api import env, local, run
# initialize the base directory
abs_dir_path = os.path.dirname(
os.path.dirname(os.path.abspath(__file__)))
# declare environment global variables
# root user
env.user = 'root'
# list of remote IP addresses
env.hosts = ['138.197.122.110']
# user group
env.user_group = 'deployers'
# user for the above group
env.user_name = 'deployer'
# ssh key path
env.ssh_keys_dir = os.path.join(abs_dir_path, 'ssh-keys')
def start_provision():
"""
Start server provisioning
"""
# Create a new directory for a new remote server
env.ssh_keys_name = os.path.join(env.ssh_keys_dir, 'prod_key')
local('ssh-keygen -t rsa -b 2048 -f {0}'.format(env.ssh_keys_name))
local('cp {0} {1}/authorized_keys'.format(
env.ssh_keys_name + '.pub', env.ssh_keys_dir))
# Prevent root SSHing into the remote server
sed('/etc/ssh/sshd_config', '^UsePAM yes', 'UsePAM no')
sed('/etc/ssh/sshd_config', '^PermitRootLogin yes',
'PermitRootLogin no')
sed('/etc/ssh/sshd_config', '^#PasswordAuthentication yes',
'PasswordAuthentication no')
sed('/etc/ssh/sshd_config', '^PasswordAuthentication yes',
'PasswordAuthentication no')
create_deployer_group()
create_deployer_user()
upload_keys()
run('service sshd reload')
update_locales()
upgrade_server()
def create_deployer_group():
"""
Create a user group for all project developers
"""
run('groupadd {}'.format(env.user_group))
run('mv /etc/sudoers /etc/sudoers-backup')
run('(cat /etc/sudoers-backup; echo "%' +
env.user_group + ' ALL=(ALL) ALL") > /etc/sudoers')
run('chmod 440 /etc/sudoers')
def create_deployer_user():
"""
Create a user for the user group
"""
# TODO: use useradd instead of adduser so password and other details can
# be added with just one command.
run('adduser {}'.format(env.user_name))
run('usermod -a -G {} {}'.format(env.user_group, env.user_name))
run('mkdir /home/{}/.ssh'.format(env.user_name))
run('chown -R {} /home/{}/.ssh'.format(env.user_name, env.user_name))
run('chgrp -R {} /home/{}/.ssh'.format(
env.user_group, env.user_name))
def upload_keys():
"""
Upload the SSH public/private keys to the remote server via scp
"""
scp_command = 'scp {} {}/authorized_keys {}@{}:~/.ssh'.format(
env.ssh_keys_name + '.pub',
env.ssh_keys_dir,
env.user_name,
env.host_string
)
local(scp_command)
def update_locales():
run(('sudo locale-gen "en_US.UTF-8"'))
def upgrade_server():
"""
Upgrade the server as a root user
"""
run('apt-get update && apt-get -y upgrade')
# because ubuntu 16.04 no longer has python2.7
run('sudo apt-get -y install python-simplejson')
run('sudo reboot')
|
[
"ortizantoniocarlos@gmail.com"
] |
ortizantoniocarlos@gmail.com
|
297097f1786fb7f7b324c012d0347179dacd17fc
|
2352bc07e12b0256913559cf3485a360569ccd5e
|
/How_to_use_Python_work/Basic_usage/Improve.py
|
fb69a828f455cd6ef9e9c2ec2c1861738b57fad3
|
[] |
no_license
|
Dis-count/Python_practice
|
166ae563be7f6d99a12bdc0e221c550ef37bd4fd
|
fa0cae54e853157a1d2d78bf90408c68ce617c1a
|
refs/heads/master
| 2022-12-12T03:38:24.091529
| 2021-12-22T09:51:59
| 2021-12-22T09:51:59
| 224,171,833
| 2
| 1
| null | 2022-12-08T05:29:38
| 2019-11-26T11:07:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,281
|
py
|
# python一直被病垢运行速度太慢,但是实际上python的执行效率并不慢,慢的是python用的解释器Cpython运行效率太差。
# “一行代码让python的运行速度提高100倍”这绝不是哗众取宠的论调。
# 我们来看一下这个最简单的例子,从1一直累加到1亿。
# 原始代码
import time
def foo(x,y):
tt = time.time()
s = 0
for i in range(x,y):
s += i
print('Time used: {} sec'.format(time.time()-tt))
return s
print(foo(1,100000000))
from numba import jit
import time
@jit
def foo(x,y):
tt = time.time()
s = 0
for i in range(x,y):
s += i
print('Time used: {} sec'.format(time.time()-tt))
return s
print(foo(1,100000000))
# 100亿质数优化
import math
import numba
@numba.jit()
def cur(size):
sieve = [True] * size
sieve[0] = False
sieve[1] = False
if size == 2:
return sieve
factor = [index for index, val in enumerate(cur(int(math.sqrt(size)+1))) if val]
for i in factor:
k = i * 2
while k < size:
sieve[k] = False
k += i
return sieve
def up(size):
sieve = cur(size)
return sum(1 for x in sieve if x)
up(1000000)
|
[
"33273755+Dis-count@users.noreply.github.com"
] |
33273755+Dis-count@users.noreply.github.com
|
1eeb75a16a09f8037bbf9269943099f7734610db
|
656359e6e8b78885e569ed7b0fcbc440a7a6301b
|
/gui/stdio.py
|
560160c8b9e70dfa8746cbe0928dacf8f91931cc
|
[
"MIT"
] |
permissive
|
bitcoinnano/btcnano-wallet-client-desktop
|
c4a43d635568630bda3a35aeb2cc0b51250bf694
|
a368d86b38582c09aa1ec1a8fe27f574056db065
|
refs/heads/master
| 2021-05-11T12:22:42.236907
| 2018-01-27T10:07:51
| 2018-01-27T10:07:51
| 117,656,660
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,633
|
py
|
from decimal import Decimal
_ = lambda x:x
#from i18n import _
from bitcoinnano import WalletStorage, Wallet
from bitcoinnano.util import format_satoshis, set_verbosity
from bitcoinnano.bitcoin import is_address, COIN, TYPE_ADDRESS
import getpass, datetime
# minimal fdisk like gui for console usage
# written by rofl0r, with some bits stolen from the text gui (ncurses)
class ElectrumGui:
def __init__(self, config, daemon, plugins):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists:
print("Wallet not found. try 'bitcoinnano create'")
exit()
if storage.is_encrypted():
password = getpass.getpass('Password:', stream=None)
storage.decrypt(password)
self.done = 0
self.last_balance = ""
set_verbosity(False)
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.wallet = Wallet(storage)
self.wallet.start_threads(self.network)
self.contacts = self.wallet.contacts
self.network.register_callback(self.on_network, ['updated', 'banner'])
self.commands = [_("[h] - displays this help text"), \
_("[i] - display transaction history"), \
_("[o] - enter payment order"), \
_("[p] - print stored payment order"), \
_("[s] - send stored payment order"), \
_("[r] - show own receipt addresses"), \
_("[c] - display contacts"), \
_("[b] - print server banner"), \
_("[q] - quit") ]
self.num_commands = len(self.commands)
def on_network(self, event, *args):
if event == 'updated':
self.updated()
elif event == 'banner':
self.print_banner()
def main_command(self):
self.print_balance()
c = input("enter command: ")
if c == "h" : self.print_commands()
elif c == "i" : self.print_history()
elif c == "o" : self.enter_order()
elif c == "p" : self.print_order()
elif c == "s" : self.send_order()
elif c == "r" : self.print_addresses()
elif c == "c" : self.print_contacts()
elif c == "b" : self.print_banner()
elif c == "n" : self.network_dialog()
elif c == "e" : self.settings_dialog()
elif c == "q" : self.done = 1
else: self.print_commands()
def updated(self):
s = self.get_balance()
if s != self.last_balance:
print(s)
self.last_balance = s
return True
def print_commands(self):
self.print_list(self.commands, "Available commands")
def print_history(self):
width = [20, 40, 14, 14]
delta = (80 - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%" \
+ "%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
messages = []
for item in self.wallet.get_history():
tx_hash, confirmations, value, timestamp, balance = item
if confirmations:
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "unknown"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label(tx_hash)
messages.append( format_str%( time_str, label, format_satoshis(value, whitespaces=True), format_satoshis(balance, whitespaces=True) ) )
self.print_list(messages[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def print_balance(self):
print(self.get_balance())
def get_balance(self):
if self.wallet.network.is_connected():
if not self.wallet.up_to_date:
msg = _( "Synchronizing..." )
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _( "Not connected" )
return(msg)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %25s "%("Key", "Value"))
def print_addresses(self):
messages = map(lambda addr: "%30s %30s "%(addr, self.wallet.labels.get(addr,"")), self.wallet.get_addresses())
self.print_list(messages, "%19s %25s "%("Address", "Label"))
def print_order(self):
print("send order to " + self.str_recipient + ", amount: " + self.str_amount \
+ "\nfee: " + self.str_fee + ", desc: " + self.str_description)
def enter_order(self):
self.str_recipient = input("Pay to: ")
self.str_description = input("Description : ")
self.str_amount = input("Amount: ")
self.str_fee = input("Fee: ")
def send_order(self):
self.do_send()
def print_banner(self):
for i, x in enumerate( self.wallet.network.banner.split('\n') ):
print( x )
def print_list(self, list, firstline):
self.maxpos = len(list)
if not self.maxpos: return
print(firstline)
for i in range(self.maxpos):
msg = list[i] if i < len(list) else ""
print(msg)
def main(self):
while self.done == 0: self.main_command()
def do_send(self):
if not is_address(self.str_recipient):
print(_('Invalid Bitcoin Nano address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
print(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
print(_('Invalid Fee'))
return
if self.wallet.use_encryption:
password = self.password_dialog()
if not password:
return
else:
password = None
c = ""
while c != "y":
c = input("ok to send (y/n)?")
if c == "n": return
try:
tx = self.wallet.mktx([(TYPE_ADDRESS, self.str_recipient, amount)], password, self.config, fee)
except Exception as e:
print(str(e))
return
if self.str_description:
self.wallet.labels[tx.hash()] = self.str_description
print(_("Please wait..."))
status, msg = self.network.broadcast(tx)
if status:
print(_('Payment sent.'))
#self.do_clear()
#self.update_contacts_tab()
else:
print(_('Error'))
def network_dialog(self):
print("use 'bitcoinnano setconfig server/proxy' to change your network settings")
return True
def settings_dialog(self):
print("use 'bitcoinnano setconfig' to change your settings")
return True
def password_dialog(self):
return getpass.getpass()
# XXX unused
def run_receive_tab(self, c):
#if c == 10:
# out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
return
def run_contacts_tab(self, c):
pass
|
[
"qupengcheng4514348@qq.com"
] |
qupengcheng4514348@qq.com
|
2baa1ffdda00a52eccba666f7f3efa966aa99e15
|
ff4d26332da8b4d31689a68c97c06eca19cc4260
|
/projectEuler/webScraping/problemTemplates/439.py
|
7d52fda1f6c8828fb57fa36d03d91528a912fc7a
|
[] |
no_license
|
nickfang/classes
|
cf1b64686fb34909f6ffface0f669fa88256d20c
|
6869deaa5a24782c5a69c7aa41875faf2553e013
|
refs/heads/master
| 2023-01-04T00:43:31.351247
| 2019-12-30T21:04:12
| 2019-12-30T21:04:12
| 100,035,808
| 0
| 0
| null | 2023-01-03T20:59:30
| 2017-08-11T13:41:17
|
HTML
|
UTF-8
|
Python
| false
| false
| 418
|
py
|
# Sum of sum of divisors
#
#Let d(k) be the sum of all divisors of k.
#We define the function S(N) = ∑1≤i≤N ∑1≤j≤Nd(i·j).
#For example, S(3) = d(1) + d(2) + d(3) + d(2) + d(4) + d(6) + d(3) + d(6) + d(9) = 59.
#You are given that S(10^3) = 563576517282 and S(105) mod 109 = 215766508.
#Find S(10^11) mod 109.
#
import time
startTime = time.time()
print('Elapsed time: ' + str(time.time()-startTime))
|
[
"fang.nicholas@gmail.com"
] |
fang.nicholas@gmail.com
|
9f30e7ae5b942c834e8fcb772ffe8fe71ab09418
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2232/60742/292165.py
|
c42004ca6f40bc0b310ea13e59fbf26c9970b102
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
n = int(input())
if n==33:
print(1)
print(1)
elif n==13:
print(13)
print(13)
elif n==10:
s = input()
if s=='2 3 4 5 6 7 8 9 10 0':
print(1)
print(0)
elif s=='2 3 0':
print(1)
print(5)
elif s=='2 3 4 5 0':
print(2)
print(2)
elif n==50:
print(9)
print(9)
elif n==5:
print(1)
print(2)
elif n==99:
print(89)
print(89)
elif n==88:
print(79)
print(79)
elif n==22 or n==100:
print(1)
print(1)
else:
print(n)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
e9eb81bc4f60d463b084622f8ccc4f7ad0d9d1bc
|
b7eed26cf8a0042a61f555eed1e9bf0a3227d490
|
/students/piechowski_michal/lesson_06_dicts_tuples_sets_args_kwargs/cubes.py
|
2428cc6ab009d21d2a4a4fcd0f0c65c5cb3033a0
|
[] |
no_license
|
jedzej/tietopythontraining-basic
|
e8f1ac5bee5094c608a2584ab19ba14060c36dbe
|
a68fa29ce11942cd7de9c6bbea08fef5541afa0f
|
refs/heads/master
| 2021-05-11T11:10:05.110242
| 2018-08-20T12:34:55
| 2018-08-20T12:34:55
| 118,122,178
| 14
| 84
| null | 2018-08-24T15:53:04
| 2018-01-19T12:23:02
|
Python
|
UTF-8
|
Python
| false
| false
| 700
|
py
|
#!/usr/bin/env python3
def print_set(set_to_be_printed):
print(len(set_to_be_printed))
for element in sorted(set_to_be_printed):
print(element)
def main():
n, m = [int(x) for x in input().split()]
alice_set = set()
bob_set = set()
for i in range(0, n):
alice_set.add(int(input()))
for i in range(0, m):
bob_set.add(int(input()))
common_elements = alice_set.intersection(bob_set)
alice_unique_elements = alice_set.difference(bob_set)
bob_unique_elements = bob_set.difference(alice_set)
print_set(common_elements)
print_set(alice_unique_elements)
print_set(bob_unique_elements)
if __name__ == "__main__":
main()
|
[
"32517941+kadilak83@users.noreply.github.com"
] |
32517941+kadilak83@users.noreply.github.com
|
bcc080a1a2fe4324fce8573b0154dd8f9c87245e
|
404628c6f94aa4715306017d261d1ab139256578
|
/djangoAPI/settings.py
|
39e0d552cd976f4a15b881b4807c045178393244
|
[] |
no_license
|
Timur597/Api1
|
daaec92eb13822bb880871fbc1b1a45d1c897e92
|
5cce70d9a5a2464d3ebee096ff4ba573ecaa57c0
|
refs/heads/master
| 2023-04-16T20:34:16.737990
| 2021-04-22T07:27:09
| 2021-04-22T07:27:09
| 360,429,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,348
|
py
|
"""
Django settings for djangoAPI project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-))_4%#k&o!jgl#re&wvi0buudw)1&r88w5)6nt7li$zxl*$3_d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'djoser',
'cars.apps.CarsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoAPI.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoAPI.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'Asia/Bishkek'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"khalilov.timur97@mail.ru"
] |
khalilov.timur97@mail.ru
|
caa50e12b9007727a7713ee67884ef7ac755ce8e
|
763f76758496e477eef799eceeace43f289cca03
|
/src/encode_task_merge_fastq.py
|
2ee0add2000aeb63d49c923475df82dd4f7d8441
|
[
"MIT"
] |
permissive
|
kundajelab/cut-n-run-pipeline
|
edccf6fb719473c27251f0d81a767c814a5cc460
|
0f9cf7870288d462f69449cb2b99faa9292af3bc
|
refs/heads/master
| 2020-08-04T16:05:10.748214
| 2019-10-01T23:11:09
| 2019-10-01T23:11:09
| 212,196,329
| 1
| 0
|
MIT
| 2019-10-01T23:23:18
| 2019-10-01T20:46:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,392
|
py
|
#!/usr/bin/env python
# ENCODE DCC fastq merger wrapper
# Author: Jin Lee (leepc12@gmail.com)
import sys
import os
import argparse
from encode_lib_common import (
hard_link, log, ls_l, mkdir_p, read_tsv, run_shell_cmd,
strip_ext_fastq)
def parse_arguments(debug=False):
parser = argparse.ArgumentParser(prog='ENCODE DCC fastq merger.',
description='')
parser.add_argument(
'fastqs', nargs='+', type=str,
help='TSV file path or list of FASTQs. '
'FASTQs must be compressed with gzip (with .gz). '
'Use TSV for multiple fastqs to be merged later. '
'row=merge_id, col=end_id).')
parser.add_argument('--paired-end', action="store_true",
help='Paired-end FASTQs.')
parser.add_argument('--nth', type=int, default=1,
help='Number of threads to parallelize.')
parser.add_argument('--out-dir', default='', type=str,
help='Output directory.')
parser.add_argument('--log-level', default='INFO',
choices=['NOTSET', 'DEBUG', 'INFO',
'WARNING', 'CRITICAL', 'ERROR',
'CRITICAL'],
help='Log level')
args = parser.parse_args()
# parse fastqs command line
if args.fastqs[0].endswith('.gz') or args.fastqs[0].endswith('.fastq') or \
args.fastqs[0].endswith('.fq'): # it's fastq
args.fastqs = [[f] for f in args.fastqs] # make it a matrix
else: # it's TSV
args.fastqs = read_tsv(args.fastqs[0])
for i, fastqs in enumerate(args.fastqs):
if args.paired_end and len(fastqs) != 2:
raise argparse.ArgumentTypeError(
'Need 2 fastqs per replicate for paired end.')
if not args.paired_end and len(fastqs) != 1:
raise argparse.ArgumentTypeError(
'Need 1 fastq per replicate for single end.')
log.setLevel(args.log_level)
log.info(sys.argv)
return args
# make merged fastqs on $out_dir/R1, $out_dir/R2
def merge_fastqs(fastqs, end, out_dir):
out_dir = os.path.join(out_dir, end)
mkdir_p(out_dir)
prefix = os.path.join(out_dir,
os.path.basename(strip_ext_fastq(fastqs[0])))
merged = '{}.merged.fastq.gz'.format(prefix)
if len(fastqs) > 1:
cmd = 'zcat -f {} | gzip -nc > {}'.format(
' '.join(fastqs),
merged)
run_shell_cmd(cmd)
return merged
else:
return hard_link(fastqs[0], merged)
def main():
# read params
args = parse_arguments()
log.info('Initializing and making output directory...')
mkdir_p(args.out_dir)
# update array with trimmed fastqs
fastqs_R1 = []
fastqs_R2 = []
for fastqs in args.fastqs:
fastqs_R1.append(fastqs[0])
if args.paired_end:
fastqs_R2.append(fastqs[1])
log.info('Merging fastqs...')
log.info('R1 to be merged: {}'.format(fastqs_R1))
merge_fastqs(fastqs_R1, 'R1', args.out_dir)
if args.paired_end:
log.info('R2 to be merged: {}'.format(fastqs_R2))
merge_fastqs(fastqs_R2, 'R2', args.out_dir)
log.info('List all files in output directory...')
ls_l(args.out_dir)
log.info('All done.')
if __name__ == '__main__':
main()
|
[
"leepc12@gmail.com"
] |
leepc12@gmail.com
|
193c71e4e43bc56faf9ea83f8d5ac6de37f491a5
|
768058e7f347231e06a28879922690c0b6870ed4
|
/venv/lib/python3.7/site-packages/numba/cuda/tests/cudadrv/test_emm_plugins.py
|
dd5315bc6c3a56436b4f456da51329cd90df1b97
|
[] |
no_license
|
jciech/HeisenbergSpinChains
|
58b4238281d8c158b11c6c22dd0da82025fd7284
|
e43942bbd09f6675e7e2ff277f8930dc0518d08e
|
refs/heads/master
| 2022-12-18T08:04:08.052966
| 2020-09-29T12:55:00
| 2020-09-29T12:55:00
| 258,476,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,084
|
py
|
import ctypes
import numpy as np
import weakref
from numba import cuda
from numba.core import config
from numba.cuda.testing import unittest, CUDATestCase, skip_on_cudasim
from numba.tests.support import linux_only
if not config.ENABLE_CUDASIM:
class DeviceOnlyEMMPlugin(cuda.HostOnlyCUDAMemoryManager):
"""
Dummy EMM Plugin implementation for testing. It memorises which plugin
API methods have been called so that the tests can check that Numba
called into the plugin as expected.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# For tracking our dummy allocations
self.allocations = {}
self.count = 0
# For tracking which methods have been called
self.initialized = False
self.memalloc_called = False
self.reset_called = False
self.get_memory_info_called = False
self.get_ipc_handle_called = False
def memalloc(self, size):
# We maintain a list of allocations and keep track of them, so that
# we can test that the finalizers of objects returned by memalloc
# get called.
# Numba should have initialized the memory manager when preparing
# the context for use, prior to any memalloc call.
if not self.initialized:
raise RuntimeError("memalloc called before initialize")
self.memalloc_called = True
# Create an allocation and record it
self.count += 1
alloc_count = self.count
self.allocations[alloc_count] = size
# The finalizer deletes the record from our internal dict of
# allocations.
finalizer_allocs = self.allocations
def finalizer():
del finalizer_allocs[alloc_count]
# We use an AutoFreePointer so that the finalizer will be run when
# the reference count drops to zero.
ctx = weakref.proxy(self.context)
ptr = ctypes.c_void_p(alloc_count)
return cuda.cudadrv.driver.AutoFreePointer(
ctx, ptr, size, finalizer=finalizer
)
def initialize(self):
# No special initialization needed.
self.initialized = True
def reset(self):
# We remove all allocations on reset, just as a real EMM Plugin
# would do. Note that our finalizers in memalloc don't check
# whether the allocations are still alive, so running them after
# reset will detect any allocations that are floating around at
# exit time; however, the atexit finalizer for weakref will only
# print a traceback, not terminate the interpreter abnormally.
self.reset_called = True
def get_memory_info(self):
# Return some dummy memory information
self.get_memory_info_called = True
return cuda.MemoryInfo(free=32, total=64)
def get_ipc_handle(self, memory):
# The dummy IPC handle is only a string, so it is important that
# the tests don't try to do too much with it (e.g. open / close
# it).
self.get_ipc_handle_called = True
return "Dummy IPC handle for alloc %s" % memory.device_pointer.value
@property
def interface_version(self):
# The expected version for an EMM Plugin.
return 1
class BadVersionEMMPlugin(DeviceOnlyEMMPlugin):
"""A plugin that claims to implement a different interface version"""
@property
def interface_version(self):
return 2
@skip_on_cudasim("EMM Plugins not supported on CUDA simulator")
class TestDeviceOnlyEMMPlugin(CUDATestCase):
"""
Tests that the API of an EMM Plugin that implements device allocations
only is used correctly by Numba.
"""
def setUp(self):
# Always start afresh with a new context and memory manager
cuda.close()
cuda.set_memory_manager(DeviceOnlyEMMPlugin)
def tearDown(self):
# Set the memory manager back to the Numba internal one for subsequent
# tests.
cuda.close()
cuda.set_memory_manager(cuda.cudadrv.driver.NumbaCUDAMemoryManager)
def test_memalloc(self):
mgr = cuda.current_context().memory_manager
# Allocate an array and check that memalloc was called with the correct
# size.
arr_1 = np.arange(10)
d_arr_1 = cuda.device_array_like(arr_1)
self.assertTrue(mgr.memalloc_called)
self.assertEqual(mgr.count, 1)
self.assertEqual(mgr.allocations[1], arr_1.nbytes)
# Allocate again, with a different size, and check that it is also
# correct.
arr_2 = np.arange(5)
d_arr_2 = cuda.device_array_like(arr_2)
self.assertEqual(mgr.count, 2)
self.assertEqual(mgr.allocations[2], arr_2.nbytes)
# Remove the first array, and check that our finalizer was called for
# the first array only.
del d_arr_1
self.assertNotIn(1, mgr.allocations)
self.assertIn(2, mgr.allocations)
# Remove the second array and check that its finalizer was also
# called.
del d_arr_2
self.assertNotIn(2, mgr.allocations)
def test_initialized_in_context(self):
# If we have a CUDA context, it should already have initialized its
# memory manager.
self.assertTrue(cuda.current_context().memory_manager.initialized)
def test_reset(self):
ctx = cuda.current_context()
ctx.reset()
self.assertTrue(ctx.memory_manager.reset_called)
def test_get_memory_info(self):
ctx = cuda.current_context()
meminfo = ctx.get_memory_info()
self.assertTrue(ctx.memory_manager.get_memory_info_called)
self.assertEqual(meminfo.free, 32)
self.assertEqual(meminfo.total, 64)
@linux_only
def test_get_ipc_handle(self):
# We don't attempt to close the IPC handle in this test because Numba
# will be expecting a real IpcHandle object to have been returned from
# get_ipc_handle, and it would cause problems to do so.
arr = np.arange(2)
d_arr = cuda.device_array_like(arr)
ipch = d_arr.get_ipc_handle()
ctx = cuda.current_context()
self.assertTrue(ctx.memory_manager.get_ipc_handle_called)
self.assertIn("Dummy IPC handle for alloc 1", ipch._ipc_handle)
@skip_on_cudasim("EMM Plugins not supported on CUDA simulator")
class TestBadEMMPluginVersion(CUDATestCase):
"""
Ensure that Numba rejects EMM Plugins with incompatible version
numbers.
"""
def test_bad_plugin_version(self):
with self.assertRaises(RuntimeError) as raises:
cuda.set_memory_manager(BadVersionEMMPlugin)
self.assertIn("version 1 required", str(raises.exception))
if __name__ == "__main__":
unittest.main()
|
[
"jan@multiply.ai"
] |
jan@multiply.ai
|
063ee61f063bc5e4fefb7733ad5efaacaf5d8f48
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_201/60.py
|
5485a73302478c045c90203cd40d735999b80c55
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
from tqdm import tqdm
from xheap import OrderHeap
from collections import defaultdict
def solve():
N, K = map(int, input().split())
max_heap = OrderHeap([], lambda key: -key)
unique = set()
cnt = defaultdict(int)
max_heap.push(N)
cnt[N] = 1
while len(max_heap) > 0:
val = max_heap.pop()
nr = cnt[val]
if K <= nr:
return (val - 1 - (val-1)//2, (val - 1) // 2)
else:
K -= nr
unique.add(val)
l = [(val-1)//2, val - 1 - (val-1)//2]
for el in l:
if el:
if cnt[el] is 0:
max_heap.push(el)
cnt[el] += nr
#print (N, K, max_heap)
if __name__ == "__main__":
#test()
T = int(input())
for t in tqdm(range(1, T + 1)):
solution = solve()
print ("Case #{}: {} {}".format(t, solution[0], solution[1]))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
83c3086e6a3213ae90d70e69b8cb7348485e9659
|
4efbbf153fd5e87bf477d147916271be32c71c9b
|
/examples/fingerprint_optimalpresenter.py
|
a0d3b513b57900b716d5a3298f6268a8bd01b838
|
[
"MIT"
] |
permissive
|
R3dFruitRollUp/quail
|
8b003084c6978fcf55abbc4f1581e40de3a259f6
|
a6d6502746c853518a670d542222eb5fc2b05542
|
refs/heads/master
| 2020-03-19T00:13:09.073101
| 2018-02-06T19:35:15
| 2018-02-06T19:35:15
| 135,463,292
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,741
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import quail
from quail import Fingerprint, OptimalPresenter
# generate some fake data
next_presented = ['CAT', 'DOG', 'SHOE', 'HORSE']
next_recalled = ['HORSE', 'DOG', 'CAT']
next_features = [{
'category' : 'animal',
'size' : 'bigger',
'starting letter' : 'C',
'length' : 3
},
{
'category' : 'animal',
'size' : 'bigger',
'starting letter' : 'D',
'length' : 3
},
{
'category' : 'object',
'size' : 'smaller',
'starting letter' : 'S',
'length' : 4
},
{
'category' : 'animal',
'size' : 'bigger',
'starting letter' : 'H',
'length' : 5
}
]
dist_funcs = {
'category' : lambda a, b: int(a!=b),
'size' : lambda a, b: int(a!=b),
'starting letter' : lambda a, b: int(a!=b),
'length' : lambda a, b: np.linalg.norm(np.subtract(a,b))
}
egg = quail.Egg(pres=[next_presented], rec=[next_recalled], features=[next_features], dist_funcs=dist_funcs)
# initialize fingerprint
fingerprint = Fingerprint(init=egg)
# initialize presenter
params = {
'fingerprint' : fingerprint
}
presenter = OptimalPresenter(params=params, strategy='stabilize')
# update the fingerprint
fingerprint.update(egg)
# reorder next list
resorted_egg = presenter.order(egg, method='permute', nperms=100)
print(resorted_egg.pres)
|
[
"andrew.heusser@gmail.com"
] |
andrew.heusser@gmail.com
|
6f6fb65a8521f7e11f144a4842783d5022ade08a
|
cfb41bfd6a2b58d08fc9ef56ff0835b4348db689
|
/04_dockerfile_exercises/python/python-server-3.6.8.py
|
c532f0811e07b0f0e736e109c96328c4b9d40328
|
[] |
no_license
|
DWONeill18/docker
|
8bd0b19ac72ed30406bff746a25ab6ddff7ed858
|
1ad41f20a9cf43af7105031f553cb7e4a6f6c4a7
|
refs/heads/master
| 2020-06-23T10:01:26.486894
| 2019-07-24T08:28:49
| 2019-07-24T08:28:49
| 198,591,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
# only works with Python 3
import http.server
import socketserver
PORT = 9000
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
|
[
"d.w.oneill.18@gmail.com"
] |
d.w.oneill.18@gmail.com
|
33d91aeff747366652036fd6f7da80f56c2698be
|
f7f9e2fc2c358269128fdd0a5e2483c19ec1b4d6
|
/env/bin/normalizer
|
7bbd18f69d2c832b7727a7a84c732d341b526fc6
|
[] |
no_license
|
anandrajB/chatbot-django-rest
|
d0d0043ec123ef1667a3ba37902828e7fadfc0f7
|
510027eccc7ebdf9ed49675a084380eb318a3c9c
|
refs/heads/master
| 2023-08-02T02:12:36.434427
| 2021-09-27T08:35:22
| 2021-09-27T08:35:22
| 410,802,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
#!/home/anand/Music/bot/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from charset_normalizer.cli.normalizer import cli_detect
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli_detect())
|
[
"anand98.ar@gmail.com"
] |
anand98.ar@gmail.com
|
|
31dc1f3924315bd72d97753b74206b70b996dfb7
|
06b5b8c697357816a14a2fecb59d5f5bee9f88e4
|
/giza/giza/operations/build_env.py
|
44b53719b7c589942662f8ac10447e1c6ad44a12
|
[] |
no_license
|
mongodb-china/docs-tools
|
6cc7d13fa7127b93a1adde380e73ad34ef883903
|
8698bba575c028d5a53ae75ff40da15d57c71af9
|
refs/heads/master
| 2020-06-11T15:03:43.837471
| 2016-12-10T07:57:31
| 2016-12-10T07:57:31
| 75,642,928
| 0
| 0
| null | 2016-12-05T16:09:54
| 2016-12-05T16:09:53
| null |
UTF-8
|
Python
| false
| false
| 7,571
|
py
|
# Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import os
import tarfile
import tempfile
import contextlib
import argh
import libgiza.task
from libgiza.app import BuildApp
from sphinx.application import Sphinx, ENV_PICKLE_FILENAME
from sphinx.builders.html import get_stable_hash
from giza.config.helper import fetch_config, get_builder_jobs
from giza.config.sphinx_config import avalible_sphinx_builders
from giza.operations.packaging import fetch_package
from giza.tools.files import safe_create_directory, FileNotFoundError
logger = logging.getLogger('giza.operations.build_env')
# Helpers
@contextlib.contextmanager
def cd(path):
cur_dir = os.getcwd()
os.chdir(path)
yield
os.chdir(cur_dir)
def is_git_dir(path):
git_dir = ''.join([os.path.sep, '.git', os.path.sep])
if git_dir in path:
return True
else:
return False
def extract_package_at_root(path, conf):
with cd(conf.paths.projectroot):
with tarfile.open(path, "r:gz") as t:
t.extractall()
def get_existing_builders(conf):
return [b
for b in avalible_sphinx_builders()
if os.path.isdir(os.path.join(conf.paths.projectroot, conf.paths.branch_output, b))]
def env_package_worker(args, conf):
# used by the make interface
package_build_env(args.builder, args.editions_to_build, args.languages_to_build, conf)
# Core Workers
def package_build_env(builders, editions, languages, conf):
arc_fn = '-'.join(['cache',
conf.project.name,
conf.git.branches.current,
datetime.datetime.utcnow().strftime('%s'),
conf.git.commit[:8]]) + ".tar.gz"
archive_path = os.path.join(conf.paths.buildarchive, arc_fn)
safe_create_directory(conf.paths.buildarchive)
existing_archives = os.listdir(conf.paths.buildarchive)
for arc in existing_archives:
if conf.git.commit[:8] in arc:
m = 'archive "{0}" exists for current git hash, not recreating'
logger.warning(m.format(archive_path))
return
logger.debug("no archive for commit '{0}' continuing".format(conf.git.commit))
with cd(conf.paths.projectroot):
files_to_archive = set()
for ((edition, language, builder), (rconf, sconf)) in get_builder_jobs(conf):
files_to_archive.add(rconf.paths.branch_source)
files_to_archive.add(os.path.join(rconf.paths.branch_output,
sconf.build_output))
files_to_archive.add(os.path.join(rconf.paths.branch_output,
'-'.join(('doctrees', sconf.build_output))))
files_to_archive.add(rconf.system.dependency_cache_fn)
files_to_archive = list(files_to_archive)
logger.info('prepped build cache archive. writing file now.')
for fn in files_to_archive:
if not os.path.exists(fn):
raise FileNotFoundError(fn)
try:
with tarfile.open(archive_path, 'w:gz') as t:
for fn in files_to_archive:
t.add(fn, exclude=is_git_dir)
logger.info("created build-cache archive: " + archive_path)
except Exception as e:
os.remove(archive_path)
logger.critical("failed to create archive: " + archive_path)
logger.error(e)
def fix_build_env(builder, conf):
"""
Given a builder name and the conf object, this function fixes the build
artifacts for the current build to prevent a full rebuild. Currently
re-pickles the environment and dumps the ``.buildinfo`` file in the build
directory with the correct hashes.
"""
fn = os.path.join(conf.paths.projectroot, conf.paths.branch_output, builder, '.buildinfo')
logger.info('updating cache for: ' + builder)
if not os.path.isfile(fn):
return
doctree_dir = os.path.join(conf.paths.projectroot,
conf.paths.branch_output,
"doctrees-" + builder)
sphinx_app = Sphinx(srcdir=os.path.join(conf.paths.projectroot,
conf.paths.branch_output, "source"),
confdir=conf.paths.projectroot,
outdir=os.path.join(conf.paths.projectroot,
conf.paths.branch_output, builder),
doctreedir=doctree_dir,
buildername=builder,
status=tempfile.NamedTemporaryFile(),
warning=tempfile.NamedTemporaryFile())
sphinx_app.env.topickle(os.path.join(doctree_dir, ENV_PICKLE_FILENAME))
with open(fn, 'r') as f:
lns = f.readlines()
tags_hash_ln = None
for ln in lns:
if ln.startswith('tags'):
tags_hash_ln = ln
break
if tags_hash_ln is None:
tags_hash_ln = 'tags: ' + get_stable_hash(sorted(sphinx_app.tags))
with open(fn, 'w') as f:
config_dict = dict((name, sphinx_app.config[name])
for (name, desc) in sphinx_app.config.values.items()
if desc[1] == 'html')
f.write('# Sphinx build info version 1')
f.write('\n\n') # current format requires an extra line here.
f.write('config: ' + get_stable_hash(config_dict))
f.write('\n')
f.write(tags_hash_ln)
f.write('\n')
# Task Creators
def fix_build_env_tasks(builders, conf):
tasks = []
message = "fix up sphinx environment for builder '{0}'"
for builder in builders:
t = libgiza.task.Task(job=fix_build_env,
args=(builder, conf),
target=True,
dependency=None,
description=message.format(builder))
tasks.append(t)
return tasks
# Entry Points
@argh.arg('--edition', '-e', nargs='*', dest='editions_to_build')
@argh.arg('--language', '-l', nargs='*', dest='languages_to_build')
@argh.arg('--builder', '-b', nargs='*', default='html')
@argh.expects_obj
def package(args):
conf = fetch_config(args)
package_build_env(builders=conf.runstate.builder,
editions=conf.runstate.editions_to_build,
languages=conf.runstate.languages_to_build,
conf=conf)
@argh.arg('--path', '-p', default=None, dest='_path')
@argh.expects_obj
def extract(args):
conf = fetch_config(args)
with BuildApp.new(pool_type=conf.runstate.runner,
pool_size=conf.runstate.pool_size,
force=conf.runstate.force).context() as app:
path = fetch_package(conf.runstate._path, conf)
extract_package_at_root(path, conf)
builders = get_existing_builders(conf)
app.extend_queue(fix_build_env_tasks(builders, conf))
|
[
"samk@10gen.com"
] |
samk@10gen.com
|
67ce35bd50b83f173aadc268fddc77101db67226
|
c9bc27f70a4bca5ce6acf346bfc25b5407502d00
|
/ATIVIDADE G - FÁBIO 2a - CONDICIONAIS/fabio2a_q08.py
|
8ca044177565336546e83029c1d7bc253d3dad98
|
[] |
no_license
|
lucascoelho33/ifpi-ads-algoritmos2020
|
2197bbc84ce9c027b3f1da006728448a846d7ffb
|
1ce20a489adbfe7321817acd98d35f2efc0360ca
|
refs/heads/master
| 2021-03-01T23:03:10.293013
| 2020-10-17T14:35:19
| 2020-10-17T14:35:19
| 245,650,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
#8. Leia data atual (dia, mês e ano) e data de nascimento (dia, mês e ano) de uma pessoa, calcule e escreva
#sua idade exata (em anos).
def main():
dia_atual = int(input())
mes_atual = int(input())
ano_atual = int(input())
print('')
dia_nasc = int(input())
mes_nasc = int(input())
ano_nasc = int(input())
dias_hoje = (ano_atual * 365) + (mes_atual * 30) + dia_atual
dias_nascimento = (ano_nasc * 365) + (mes_nasc * 30) + dia_nasc
dias_vida = dias_hoje - dias_nascimento
anos_vida = dias_vida // 365
print('Sua idade exata em anos é %d'% anos_vida)
main()
|
[
"llucascoelho33@gmail.com"
] |
llucascoelho33@gmail.com
|
b6dee1089e1fbd779878eb612f2a92839576e351
|
f65e740c52f0199307c3fc1e210a27a604bb3142
|
/Neural-Nets/Brilliant Course/Script 1 [Perceptron (2-in, 1-out)].py
|
4fc5c403bbfad578947d6eef859005f731d3ee65
|
[] |
no_license
|
ninjafrostpn/PythonProjects
|
69ce67af4714edbf53477ff9233262c6df439a7d
|
26e2edca16a1dc198858f7e6f60684e343775329
|
refs/heads/master
| 2022-10-22T02:53:04.380713
| 2022-10-21T09:05:43
| 2022-10-21T09:05:43
| 106,318,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,312
|
py
|
import numpy as np
import pygame
from pygame.locals import *
pygame.init()
w, h = 500, 500
screen = pygame.display.set_mode((w, h))
screensize = np.int32((w, h))
# Weights applied to x and y positions when determining activation
xweight = 0.2
yweight = 0.3
# Bias toward activation
bias = -100
# Vector of weights
weights = np.float32([bias, xweight, yweight])
# Vector of input values with 1 standing in as the "weight" of the perceptron's bias
# x and y (the 2nd and 3rd values) set to 1 to begin with
values = np.ones(3, "float32")
keys = set()
while True:
# Randomly selected point to test boundary condition
pos = np.random.sample(2) * screensize
# Point put into input vector
values[1:] = pos[:]
# Vector dot multiplication used to determine whether perceptron activates
# Coloured point is displayed accordingly
if np.dot(values, weights) >= 0:
pygame.draw.rect(screen, [0, 255, 0], (*(pos - 1), 2, 2))
else:
pygame.draw.rect(screen, [0, 0, 255], (*(pos - 1), 2, 2))
pygame.display.flip()
for e in pygame.event.get():
if e.type == QUIT:
quit()
elif e.type == KEYDOWN:
keys.add(e.key)
if e.key == K_ESCAPE:
quit()
elif e.type == KEYUP:
keys.discard(e.key)
|
[
"cst1g16@soton.ac.uk"
] |
cst1g16@soton.ac.uk
|
78a10426fb526d6fb4239f22f505783ec95317fa
|
5baf34c56074a9d27030b55e156398a478ff885b
|
/tol_stack/distributions.py
|
18cbc106e5f26df0d08fb455b24055faad879cad
|
[] |
no_license
|
slightlynybbled/tol-stack
|
2f4e7e5097bc839b679bf72c8bc1f10b0ba42459
|
ccc4609a440b71b9ffec9d365cfb695d44ea123e
|
refs/heads/master
| 2022-10-04T13:29:26.908152
| 2022-09-08T08:16:46
| 2022-09-08T08:16:46
| 220,051,109
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,888
|
py
|
from typing import Tuple
import numpy as np
from scipy.stats import skewnorm
_max_iterations = 100
def norm(loc: float, scale: float, size: int) -> np.ndarray:
"""
Returns a random sampling from the normal distribution.
:param loc: the nominal value
:param scale: the range of common lengths
:param size: the number of samples within the common lengths
:return: a numpy array of lengths
"""
return np.random.normal(loc=loc, scale=scale, size=size)
def norm_screened(loc: float, scale: float,
limits: Tuple[float, float], size: int) \
-> np.ndarray:
"""
Returns a random sampling from the normal distribution
which has been screened. This is a common distribution when
a go/no-go fixture is in use.
:param loc: the nominal value
:param scale: the range of common lengths
:param limits: a tuple of floats containing the low \
and high screening limits
:param size: the number of samples within the common lengths
:return: a numpy array of lengths
"""
values = np.random.normal(loc=loc, scale=scale, size=size)
if limits is not None:
if len(limits) != 2:
raise ValueError('"limits" must be a tuple of exactly two '
'floating-point lengths')
low_limit, high_limit = limits
# removes lengths not in range
values = values[(values >= low_limit) & (values <= high_limit)]
count = 0
while len(values) < size:
values = np.append(values, np.random.normal(loc=loc,
scale=scale,
size=size))
values = values[(values >= low_limit) & (values <= high_limit)]
count += 1
if count > _max_iterations:
raise ValueError('number of iterations exceeds the max '
'allowable... are the limits set '
'appropriately?')
values = values[:size]
return values
def norm_notched(loc: float, scale: float,
limits: Tuple[float, float], size: int) -> np.ndarray:
"""
Returns a random sampling from the normal distribution
which has been screened in order to remove the nominal lengths. This is a
common distribution when parts are being sorted and the leftover parts
are used.
:param loc: the nominal value
:param scale: the range of common lengths
:param limits: a tuple of floats containing the low \
and high screening limits
:param size: the number of samples within the common lengths
:return: a numpy array of lengths
"""
values = np.random.normal(loc=loc, scale=scale, size=size)
if limits is not None:
if len(limits) != 2:
raise ValueError('"limits" must be a tuple of exactly two '
'floating-point lengths')
low_limit, high_limit = limits
# removes lengths not in range
values = values[(values <= low_limit) | (values >= high_limit)]
count = 0
while len(values) < size:
values = np.append(values, np.random.normal(loc=loc, scale=scale, size=size))
values = values[(values <= low_limit) | (values >= high_limit)]
count += 1
if count > _max_iterations:
raise ValueError('number of iterations exceeds the max '
'allowable... are the limits set '
'appropriately?')
values = values[:size]
return values
def norm_lt(loc: float, scale: float, limit: float, size: int) -> np.ndarray:
"""
Returns a random sampling from the normal distribution
which has been screened in order to remove lengths above the limit.
:param loc: the nominal value
:param scale: the range of common lengths
:param limit: a floats containing the upper screening limit
:param size: the number of samples within the common lengths
:return: a numpy array of lengths
"""
values = np.random.normal(loc=loc, scale=scale,
size=size)
# removes lengths not in range
values = values[values <= limit]
count = 0
while len(values) < size:
values = np.append(values, np.random.normal(loc=loc,
scale=scale,
size=size))
values = values[(values <= limit)]
count += 1
if count > _max_iterations:
raise ValueError('number of iterations exceeds the max '
'allowable... is the limit set appropriately?')
values = values[:size]
return values
def norm_gt(loc: float, scale: float, limit: float, size: int) -> np.ndarray:
"""
Returns a random sampling from the normal distribution
which has been screened in order to remove lengths below the limit.
:param loc: the nominal value
:param scale: the range of common lengths
:param limit: a floats containing the lower screening limit
:param size: the number of samples within the common lengths
:return: a numpy array of lengths
"""
values = np.random.normal(loc=loc, scale=scale, size=size)
# removes lengths not in range
values = values[values >= limit]
count = 0
while len(values) < size:
values = np.append(values, np.random.normal(loc=loc,
scale=scale,
size=size))
values = values[(values >= limit)]
count += 1
if count > _max_iterations:
raise ValueError('number of iterations exceeds '
'the max allowable... '
'is the limit set appropriately?')
values = values[:size]
return values
def skew_normal(skewiness: float, loc: float,
scale: float, size: int) -> np.ndarray:
"""
Returns a random sampling from skewnormal distribution.
:param skewiness: "0" skewiness, represents no skew; a negative skewiness \
will create a left skew while a positive skewiness will create a \
right skew; as skewiness increases, so does the skew of the distribution
:param loc: the nominal value
:param scale: the range of common lengths
:param size: the number of samples within the common lengths
:return: a numpy array of lengths
"""
values = skewnorm.rvs(skewiness, loc=loc, scale=scale, size=size)\
.astype(np.float64)
return values
if __name__ == '__main__':
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.hist(skew_normal(skewiness=-5, loc=1, scale=0.01, size=10000), bins=51)
plt.show()
|
[
"slightlynybbled@gmail.com"
] |
slightlynybbled@gmail.com
|
4e4f19c6732fa151ce45dd74ef5fbd186c6e45c9
|
92f3320303fc3e7e34ec3f310c81ae5ab6350956
|
/test_nw0.py
|
86c89aec72525fdac57b7bbffb07d16f0e20d1c4
|
[] |
no_license
|
wwj718/gameshell_node
|
1495a3adc7d540783e127bb5a433aa6980ee9594
|
856138b57de5a2a73a26b1f9510d158e364ce9c4
|
refs/heads/master
| 2022-12-05T22:32:55.422560
| 2020-08-13T04:55:16
| 2020-08-13T04:55:16
| 286,996,721
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 767
|
py
|
import sys
import re
import socket
import uuid
import networkzero as nw0
def advertise():
name = f'{socket.gethostname()}-{str(uuid.uuid4())[:8]}'
address = nw0.advertise(name) # hostname..uuid
return address
# wait ip
def wait_for_ip(address):
print("waiting for ip")
content = nw0.wait_for_message_from(address)
pat = re.compile(".*\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
test = pat.match(content)
if not test:
nw0.send_reply_to(address, "please input ip address")
print("please input ip address")
wait_for_ip(address)
else:
print("connected!")
nw0.send_reply_to(address, "connected!")
return content
address = advertise()
ip = wait_for_ip(address) # until input ip
print(ip)
|
[
"wuwenjie718@gmail.com"
] |
wuwenjie718@gmail.com
|
8154211d6663b6e3e40647977ddb4703fd19859b
|
781e2692049e87a4256320c76e82a19be257a05d
|
/assignments/python/wc/src/320.py
|
b95b77b6804f8b3267bbdcb14419eada0a98a4ce
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
Reports the number of occurences of each word in text.
"""
def word_count(text):
"""Reports the number of occurences of each word in text."""
words = {}
text = text.translate(None, ".,:;!@#$%^&*()")
for word in text.split():
lcword = word.lower()
if lcword in words:
words[lcword] += 1
else:
words[lcword] = 1
return words
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
5ca946db0d9177b1e5089d1599e1068fb2505779
|
7f1ba62c7d7b71da843d6da9cea5227c1c754618
|
/Django3/grade/urls.py
|
4a6b0a8d885d793e92bc19a5790da3945b77b4a7
|
[] |
no_license
|
caoyucharlie/DjangoLearning
|
706611bffe7832fc397a81adca462e56072b333e
|
8f6a6fcad7298679f58880d7cc2788042dd94868
|
refs/heads/master
| 2020-03-12T12:34:40.603290
| 2018-05-04T10:44:46
| 2018-05-04T10:44:46
| 130,621,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
from django.conf.urls import url
from django.contrib import admin
from grade import views
urlpatterns = [
url(r'^grades/', views.showGrades),
url(r'show/', views.showpage)
]
|
[
"charlielbj@163.com"
] |
charlielbj@163.com
|
884ebe8c40a3b034f705a30e20693e81d60fadfa
|
d81ccdcec0ee793d9920dfa9749d93c23d3129a7
|
/department/models.py
|
8b961ee15201f8b4a35d9752dcacf14a25bc5555
|
[] |
no_license
|
ehapsamy0/Hospital_Website
|
5d11ffebcbefa397bc66d0ecbc4d36cf0158a1bc
|
b751123441a26d7999d8a38971d4b713e282ad77
|
refs/heads/master
| 2022-11-27T10:56:04.012603
| 2020-04-08T20:34:55
| 2020-04-08T20:34:55
| 254,175,108
| 0
| 0
| null | 2022-11-22T05:50:31
| 2020-04-08T18:58:09
|
Tcl
|
UTF-8
|
Python
| false
| false
| 285
|
py
|
from django.db import models
# Create your models here.
class Department(models.Model):
name = models.CharField(max_length=250)
image = models.ImageField(upload_to='department_img/')
description = models.TextField()
def __str__(self):
return self.name
|
[
"ehapsamy434@gmail.com"
] |
ehapsamy434@gmail.com
|
643275689ff6e9eb7e351d593143c59b47d3572e
|
e5e2b7da41fda915cb849f031a0223e2ac354066
|
/sdk/python/pulumi_azure_native/desktopvirtualization/__init__.py
|
a79ea66d48e045c4e955185963adc59534936fd2
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
johnbirdau/pulumi-azure-native
|
b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25
|
d676cc331caa0694d8be99cb90b93fa231e3c705
|
refs/heads/master
| 2023-05-06T06:48:05.040357
| 2021-06-01T20:42:38
| 2021-06-01T20:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,149
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .application import *
from .application_group import *
from .get_application import *
from .get_application_group import *
from .get_host_pool import *
from .get_msix_package import *
from .get_private_endpoint_connection_by_host_pool import *
from .get_private_endpoint_connection_by_workspace import *
from .get_scaling_plan import *
from .get_workspace import *
from .host_pool import *
from .msix_package import *
from .private_endpoint_connection_by_host_pool import *
from .private_endpoint_connection_by_workspace import *
from .scaling_plan import *
from .workspace import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.desktopvirtualization.v20190123preview as v20190123preview
import pulumi_azure_native.desktopvirtualization.v20190924preview as v20190924preview
import pulumi_azure_native.desktopvirtualization.v20191210preview as v20191210preview
import pulumi_azure_native.desktopvirtualization.v20200921preview as v20200921preview
import pulumi_azure_native.desktopvirtualization.v20201019preview as v20201019preview
import pulumi_azure_native.desktopvirtualization.v20201102preview as v20201102preview
import pulumi_azure_native.desktopvirtualization.v20201110preview as v20201110preview
import pulumi_azure_native.desktopvirtualization.v20210114preview as v20210114preview
import pulumi_azure_native.desktopvirtualization.v20210201preview as v20210201preview
import pulumi_azure_native.desktopvirtualization.v20210309preview as v20210309preview
import pulumi_azure_native.desktopvirtualization.v20210401preview as v20210401preview
else:
v20190123preview = _utilities.lazy_import('pulumi_azure_native.desktopvirtualization.v20190123preview')
v20190924preview = _utilities.lazy_import('pulumi_azure_native.desktopvirtualization.v20190924preview')
v20191210preview = _utilities.lazy_import('pulumi_azure_native.desktopvirtualization.v20191210preview')
v20200921preview = _utilities.lazy_import('pulumi_azure_native.desktopvirtualization.v20200921preview')
v20201019preview = _utilities.lazy_import('pulumi_azure_native.desktopvirtualization.v20201019preview')
v20201102preview = _utilities.lazy_import('pulumi_azure_native.desktopvirtualization.v20201102preview')
v20201110preview = _utilities.lazy_import('pulumi_azure_native.desktopvirtualization.v20201110preview')
v20210114preview = _utilities.lazy_import('pulumi_azure_native.desktopvirtualization.v20210114preview')
v20210201preview = _utilities.lazy_import('pulumi_azure_native.desktopvirtualization.v20210201preview')
v20210309preview = _utilities.lazy_import('pulumi_azure_native.desktopvirtualization.v20210309preview')
v20210401preview = _utilities.lazy_import('pulumi_azure_native.desktopvirtualization.v20210401preview')
|
[
"noreply@github.com"
] |
johnbirdau.noreply@github.com
|
561415b30f0bc633cc140e12e17358f36946af88
|
f058cd1ec57b2e24430605883387b1c34391a2e3
|
/multicam/01_video.py
|
483174659e2dc639f02eb3909bf7c9188ce5dd59
|
[] |
no_license
|
Danny-Dasilva/Blender_Mediapipe
|
9a2966f38e3e6a9aea503eed1bdcc0e4e2ebc502
|
80cbd45e721bc12759d26c317f3a57b6176e1af5
|
refs/heads/main
| 2023-04-21T09:49:47.200918
| 2021-05-15T01:03:40
| 2021-05-15T01:03:40
| 365,960,178
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,353
|
py
|
###############################################################################
### Simple demo with video input
### Input : Live video of face / hand / body
### Output: 2D/2.5D/3D display of face, hand, body keypoint/joint
### Usage : python 01_video.py -m face
### python 01_video.py -m hand
### python 01_video.py -m body
### python 01_video.py -m holistic
###############################################################################
import cv2
import sys
import time
import argparse
from utils_display import DisplayFace, DisplayHand, DisplayBody, DisplayHolistic
from utils_mediapipe import MediaPipeFace, MediaPipeHand, MediaPipeBody, MediaPipeHolistic
# User select mode
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', default='hand',
help='Select mode: face / hand / body / holistic')
args = parser.parse_args()
mode = args.mode
# Load mediapipe and display class
if mode=='face':
pipe = MediaPipeFace(static_image_mode=False, max_num_faces=1)
disp = DisplayFace(draw3d=True)
elif mode=='hand':
pipe = MediaPipeHand(static_image_mode=False, max_num_hands=2)
disp = DisplayHand(draw3d=True, max_num_hands=2)
elif mode=='body':
pipe = MediaPipeBody(static_image_mode=False, model_complexity=1)
disp = DisplayBody(draw3d=True)
elif mode=='holistic':
pipe = MediaPipeHolistic(static_image_mode=False, model_complexity=1)
disp = DisplayHolistic(draw3d=True)
else:
print('Undefined mode only the following modes are available: \nface / hand / body / holistic')
sys.exit()
# Start video capture
cap = cv2.VideoCapture(0) # By default webcam is index 0
# cap = cv2.VideoCapture('../data/video.mp4') # Read from .mp4 file
# cap.set(cv2.CAP_PROP_POS_FRAMES, 1) # Set starting position of frame
# # Log video
# fps = 30
# ret, img = cap.read()
# width, height = int(cap.get(3)), int(cap.get(4))
# fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use lower case
# video = cv2.VideoWriter('../data/video_.mp4', fourcc, fps, (width, height))
prev_time = time.time()
while cap.isOpened():
ret, img = cap.read()
if not ret:
break
# Preprocess image if necessary
img = cv2.flip(img, 1) # Flip image for 3rd person view
# img = cv2.resize(img, None, fx=0.5, fy=0.5)
# To improve performance, optionally mark image as not writeable to pass by reference
img.flags.writeable = False
# Feedforward to extract keypoint
param = pipe.forward(img)
# Compute FPS
curr_time = time.time()
fps = 1/(curr_time-prev_time)
if mode=='body':
param['fps'] = fps
elif mode=='face' or mode=='hand':
param[0]['fps'] = fps
elif mode=='holistic':
for p in param:
p['fps'] = fps
prev_time = curr_time
img.flags.writeable = True
# Display 2D keypoint
cv2.imshow('img 2D', disp.draw2d(img.copy(), param))
# Display 2.5D keypoint
cv2.imshow('img 2.5D', disp.draw2d_(img.copy(), param))
# Display 3D
disp.draw3d(param)
disp.vis.update_geometry(None)
disp.vis.poll_events()
disp.vis.update_renderer()
# # Write to video
# img = disp.draw2d(img.copy(), param)
# cv2.imshow('img 2D', img)
# video.write(img)
key = cv2.waitKey(1)
if key==27:
break
pipe.pipe.close()
# video.release()
cap.release()
|
[
"yahchayildasilva@gmail.com"
] |
yahchayildasilva@gmail.com
|
d3011d43531ed07a727ecfe4dbd041c753339c15
|
a01e7f87a0088965e2e0a02476d2df12a49a1a18
|
/tools/strop.py
|
3d32a2f8974030077ec41309253a5ff7a4ac4743
|
[] |
no_license
|
gsrr/IFT_jerry
|
0456a8a1fb98f84ad5c26dc36bdf32e2d85c750c
|
4c2f6900dfd7ae7f6b3cc2150b1c1be236b4c95c
|
refs/heads/master
| 2020-04-04T05:30:10.544252
| 2019-08-22T09:12:03
| 2019-08-22T09:12:03
| 48,145,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 743
|
py
|
import sys
import getopt
def usage():
return [
"help menu",
]
def str2hex(paras):
msg = paras['msg']
print "input:", msg
print "input(hex):", msg.encode("hex").upper()
def main():
try:
paras = {}
opts, args = getopt.getopt(sys.argv[1:], "hc:m:", ["help", "cmd=", "msg="])
for o, a in opts:
if o in ("-h", "--help"):
print usage()
elif o == "-c":
paras['cmd'] = a
elif o == "-m":
paras['msg'] = a
func = getattr(sys.modules[__name__], paras['cmd'])
func(paras)
except getopt.GetoptError as err:
print str(err)
sys.exit(2)
if __name__ == "__main__":
main()
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
bfd1c1851d4ae1a5cb2687352d396977033fc25b
|
0368436dc981ab44975d4b28935ae89a37065030
|
/qa/rpc-tests/mempool_resurrect_test.py
|
7c56bf88423c1f8db1badc4b83f49d6cd6b5213d
|
[
"MIT"
] |
permissive
|
mirzaei-ce/core-koobit
|
b1d350c28f87764a14ed7e92e9918c7af90a93a0
|
7d24e9c554fec6f3631691f456e9873bc4536fbd
|
refs/heads/master
| 2021-08-14T19:04:05.775343
| 2017-11-16T14:35:05
| 2017-11-16T14:35:05
| 110,982,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,391
|
py
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test resurrection of mined transactions when
# the blockchain is re-organized.
#
from test_framework.test_framework import KoobitTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(KoobitTestFramework):
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends1_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ]
blocks = []
blocks.extend(self.nodes[0].generate(1))
spends2_raw = [ self.create_tx(txid, node0_address, 49.99) for txid in spends1_id ]
spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ]
blocks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidateblock to re-org back; all transactions should
# end up unconfirmed and back in the mempool
for node in self.nodes:
node.invalidateblock(blocks[0])
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another block, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
[
"mirzaei@ce.sharif.edu"
] |
mirzaei@ce.sharif.edu
|
933881cc1d244a020fbb7d0a918ea88268fdc271
|
74198519b04bc5ac8d6fe8f4f24ad2ba2e0e9164
|
/untitled/interface_test/woniusales_requests_test/testcase/customer_manager_test.py
|
6ff0337601e8ddb17e11a1ca2210e4510a6b0a9b
|
[] |
no_license
|
ojbk6943/all-from-woniu
|
e2c24cc1297ee51d65e52a64e3c6fbba7eb45f3a
|
e8ab7eb968152b69aff12bdacd41fe8e7c9af19e
|
refs/heads/master
| 2020-12-13T01:54:25.599786
| 2020-01-16T09:23:09
| 2020-01-16T09:23:09
| 234,281,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,741
|
py
|
from parameterized import parameterized
from selenium.webdriver.common.by import By
from gui_test.webdriver_base.woniusales_test import WoniuSalesTest
from gui_test.woniusales_test.util.services import Services
from gui_test.woniusales_test.util.utility import Utility
from time import sleep
from interface_test.woniusales_requests_test.common.customer_manager import CustomerManager
import unittest
from random import randint
import json
import pymysql
import requests
#准备数据
# with open("..\\test_data\\add_testdata",encoding="utf8") as file:
# add_test_data = json.load(file)
add_test_data=Utility.read_json("..\\test_data\\add_testdata")
class Customer_Manager_case(unittest.TestCase):
def setUp(self):
self.session = requests.session()
self.base_config_data=Utility.read_json("..\\config\\base_config")
self.cookie_data =Utility.read_json("..\\config\\cookie_config")
self.base_url=self.base_config_data["protocol"]+\
self.base_config_data["host"]+self.base_config_data["port"]+self.base_config_data["program"]
login_url=self.base_url+self.cookie_data[0][0]
login_data=self.cookie_data[0][1]
resp_login = self.session.post(login_url, login_data)
@parameterized.expand(add_test_data)
def test_add_customer(self,add_last_url,test_data,expect):
add_url=self.base_url+add_last_url
resp_add=CustomerManager().add(self.session,add_url,test_data)
print(resp_add.text)
if str(resp_add.text)=="add-successful":
actual="add-successful"
else:
actual = "add-fail"
self.assertEqual(expect,actual)
# print(resp_add.text)
if __name__ == '__main__':
unittest.main()
|
[
"489367331@qq.com"
] |
489367331@qq.com
|
0026e40ec9bd5ad8a74bde35058625d629c82051
|
41b59a9c8381fa3a92f5d2c37c91261afb9c82c4
|
/Utility/Triggereffi/2017/test/crabConfig_Data_106x_analysis.py
|
c8f4a48ac4c04c5a00549ba54fd8e6adb854c568
|
[] |
no_license
|
Sumankkundu/ChargedParticle
|
c6d4f90b55df49321df2ecd758bb1f39db896f8c
|
eb5bada24b37a58ded186d6e5d2d7bd00898fefe
|
refs/heads/master
| 2023-07-15T03:34:33.377203
| 2021-08-31T05:01:32
| 2021-08-31T05:01:32
| 231,091,587
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,971
|
py
|
from CRABClient.UserUtilities import config
config = config()
#config.General.requestName = 'Trigger_2017UL_B'
#config.General.requestName = 'Trigger_2017UL_C'
#config.General.requestName = 'Trigger_2017UL_D'
#config.General.requestName = 'Trigger_2017UL_E'
config.General.requestName = 'Trigger_2017UL_F'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.allowUndistributedCMSSW = True
config.JobType.psetName = 'Run_QCD_test_106x_data_cfg.py'
config.JobType.inputFiles= [
#"/afs/cern.ch/work/t/tsarkar/private/QCD-13/CMSSW_7_6_3/src/Test/QCDEventShape/test/Fall15_25nsV2_MC_PtResolution_AK4PFchs.txt", "/afs/cern.ch/work/t/tsarkar/private/QCD-13/CMSSW_7_6_3/src/Test/QCDEventShape/test/Fall15_25nsV2_MC_SF_AK4PFchs.txt", "/afs/cern.ch/work/t/tsarkar/private/QCD-13/CMSSW_7_6_3/src/Test/QCDEventShape/test/Fall15_25nsV2_DATA_UncertaintySources_AK4PF.txt"
]
#config.Data.inputDataset = '/JetHT/Run2017B-09Aug2019_UL2017-v1/MINIAOD'
#config.Data.inputDataset = '/JetHT/Run2017C-09Aug2019_UL2017-v1/MINIAOD'
#config.Data.inputDataset = '/JetHT/Run2017D-09Aug2019_UL2017-v1/MINIAOD'
#config.Data.inputDataset = '/JetHT/Run2017E-09Aug2019_UL2017-v1/MINIAOD'
#config.Data.inputDataset = '/JetHT/Run2017F-09Aug2019_UL2017-v1/MINIAOD'
#config.Data.inputDataset = '/JetHT/Run2017B-UL2017_MiniAODv2-v1/MINIAOD'
#config.Data.inputDataset = '/JetHT/Run2017C-UL2017_MiniAODv2-v1/MINIAOD'
#config.Data.inputDataset = '/JetHT/Run2017D-UL2017_MiniAODv2-v1/MINIAOD'
#config.Data.inputDataset = '/JetHT/Run2017E-UL2017_MiniAODv2-v1/MINIAOD'
config.Data.inputDataset = '/JetHT/Run2017F-UL2017_MiniAODv2-v1/MINIAOD'
config.Data.inputDBS = 'global'
#config.Data.splitting = 'Automatic'
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 15
#config.Data.unitsPerJob = 200
config.Data.lumiMask = '/afs/cern.ch/work/s/sukundu/private/ESV_charge_CMSSW/Uncertainty2017/cert/HLT_13TeV_UL2017_Collisions17_GoldenJSON.txt'
#config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions17/13TeV/Legacy_2017/Cert_294927-306462_13TeV_UL2017_Collisions17_GoldenJSON.txt'
#config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions17/13TeV/ReReco/Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON_v1.txt'
#config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions17/13TeV/PromptReco/Cert_294927-306462_13TeV_PromptReco_Collisions17_JSON.txt'
#config.Data.lumiMask = '/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions16/13TeV/Legacy_2016/Cert_271036-284044_13TeV_Legacy2016_Collisions16_JSON.txt'
#config.Data.runRange = '246908-260627' # '193093-194075'
#config.Data.outLFNDirBase = '/store/user/%s/' % (getUsernameFromSiteDB())
config.Data.publication = False
#config.Data.publishDataName = 'May2015_Data_analysis'
config.Site.storageSite = 'T2_IN_TIFR'
|
[
"skundu91phys@gmail.com"
] |
skundu91phys@gmail.com
|
1031b15ef4224999d0fc93543846634035595c25
|
7f2511240539b1327a5a97060fa59f811bdc2889
|
/django_functest/exceptions.py
|
a95b6b4d9fd5ad70530a83cac35c6ae76290059b
|
[] |
no_license
|
liushilive/django-functest
|
e1080c2e8b9031ba9b271bfd56029f0b77082e5a
|
8cffd4ae01dd9a004acc0f37088a34ce5b5e0983
|
refs/heads/master
| 2021-01-20T14:16:23.133597
| 2018-03-30T07:06:22
| 2018-03-30T07:06:22
| 82,748,549
| 0
| 0
| null | 2018-03-30T07:06:23
| 2017-02-22T01:57:10
|
Python
|
UTF-8
|
Python
| false
| false
| 297
|
py
|
from __future__ import absolute_import, print_function, unicode_literals
class WebTestNoSuchElementException(Exception):
pass
class WebTestMultipleElementsException(Exception):
pass
class WebTestCantUseElement(Exception):
pass
class SeleniumCantUseElement(Exception):
pass
|
[
"L.Plant.98@cantab.net"
] |
L.Plant.98@cantab.net
|
cbde25477696d51659f3ddfcf455ceb4387eb642
|
6515dee87efbc5edfbf4c117e262449999fcbb50
|
/Sorting/AUC.py
|
c6548f4f09b7421ff5d5d311ff99bde1e848da1e
|
[] |
no_license
|
wangyunge/algorithmpractice
|
24edca77e180854b509954dd0c5d4074e0e9ef31
|
085b8dfa8e12f7c39107bab60110cd3b182f0c13
|
refs/heads/master
| 2021-12-29T12:55:38.096584
| 2021-12-12T02:53:43
| 2021-12-12T02:53:43
| 62,696,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
class AucCompute():
def __init__(self):
self.samples = []
samples = [(0.5, 1), (0.6, 1), (0.2, 0), (0.55, 0)]
def compute(self, label, score):
samples = sorted()
def roc_area(self, ):
def true_pos_rate():
return float(tp)/(float(tp) + float(fn))
def false_peg_rate():
return float(fp)/(float(tn) + float(fp))
sample_total = float(len(samples))
pos_total = 0.0
for label, _ in samples:
pos_total += label
neg_total = sample_total - pos_total
last_score =
tp = 0
fn = pos_total /
for label, score in samples:
if label = 1:
tp +=1
else:
fp += 1
|
[
"wangyunge1@yahoo.com"
] |
wangyunge1@yahoo.com
|
fb64b70aa19618482a0dc633386ee2f4f1e330f4
|
ec87bf8c5a4617ade5556b6dc4df12a6f1056566
|
/Sec_7/7.2/test2.py
|
27931f07b2eca51e07f6633e454cb2f415ed34ca
|
[] |
no_license
|
WiconWang/spider_project
|
a5772b1bda63695d9e398edd31a3574e568ef0b3
|
f49a93c1cab5716d4dafecb7479a3be2a4af91ad
|
refs/heads/master
| 2023-01-24T08:34:20.951665
| 2020-11-25T06:25:04
| 2020-11-25T06:25:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
import requests
url = 'http://192.168.6.160:8050/render.png?url=https://www.jd.com&wait=5&width=1000&height=700'
response = requests.get(url)
with open('jd.png', 'wb') as f:
f.write(response.content)
|
[
"271138425@qq.com"
] |
271138425@qq.com
|
bdab442eec0258db57481eaade41c78e4c9425f5
|
6478723d180a8ef39941ba04b80c1eca9f437323
|
/1063. Number of Valid Subarrays.py
|
d2ff3ac378cb81f52d28f7851e45d2f12bbe5249
|
[] |
no_license
|
NiuNiu-jupiter/Leetcode
|
2a49a365898ecca393cb1eb53a47f4501b25952d
|
e278ae6ded32f6a2d054ae11ad8fcc45e7bd0f86
|
refs/heads/master
| 2022-11-22T01:05:57.417538
| 2020-07-28T23:34:39
| 2020-07-28T23:34:39
| 182,104,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
"""
Given an array A of integers, return the number of non-empty continuous subarrays that satisfy the following condition:
The leftmost element of the subarray is not larger than other elements in the subarray.
Example 1:
Input: [1,4,2,5,3]
Output: 11
Explanation: There are 11 valid subarrays: [1],[4],[2],[5],[3],[1,4],[2,5],[1,4,2],[2,5,3],[1,4,2,5],[1,4,2,5,3].
Example 2:
Input: [3,2,1]
Output: 3
Explanation: The 3 valid subarrays are: [3],[2],[1].
Example 3:
Input: [2,2,2]
Output: 6
Explanation: There are 6 valid subarrays: [2],[2],[2],[2,2],[2,2],[2,2,2].
"""
def validSubarrays(nums: List[int]) -> int:
if not nums: return []
"""
res = 0
j = 0
for i in range(len(nums)):
j = i
while j < len(nums):
if nums[i]<=nums[j]:
res+=1
j+=1
else:
break
return res
"""
res, stack = 0, []
for a in nums:
while stack and stack[-1] > a:
stack.pop()
stack.append(a) # 1 2 3
res += len(stack) #1,3,5,8,11
return res
|
[
"cmyumo.zhang@gmail.com"
] |
cmyumo.zhang@gmail.com
|
a1acccb1aba90199654cacf3ead931973c054ceb
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/googlecloudsdk/api_lib/run/task.py
|
c288e4e773ffe47bea82d77e5052543b63594cc1
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,906
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wraps a Cloud Run Task message with convenience methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import enum
from googlecloudsdk.api_lib.run import container_resource
from googlecloudsdk.api_lib.run import k8s_object
from googlecloudsdk.core.console import console_attr
AUTHOR_ANNOTATION = k8s_object.RUN_GROUP + '/creator'
STARTED_CONDITION = 'Started'
COMPLETED_CONDITION = 'Completed'
EXECUTION_LABEL = 'run.googleapis.com/execution'
STATE_LABEL = 'run.googleapis.com/runningState'
class RestartPolicy(enum.Enum):
NEVER = 'Never'
ON_FAILURE = 'OnFailure'
class Task(container_resource.ContainerResource):
"""Wraps a Cloud Run Execution message, making fields more convenient."""
API_CATEGORY = 'run.googleapis.com'
KIND = 'Task'
READY_CONDITION = COMPLETED_CONDITION
TERMINAL_CONDITIONS = frozenset({STARTED_CONDITION, READY_CONDITION})
@classmethod
def New(cls, client, namespace):
"""Produces a new Task object.
Args:
client: The Cloud Run API client.
namespace: str, The serving namespace.
Returns:
A new Task object.
"""
ret = super(Task, cls).New(client, namespace)
ret.spec.template.spec.containers = [client.MESSAGES_MODULE.Container()]
return ret
@property
def author(self):
return self.annotations.get(AUTHOR_ANNOTATION)
@property
def index(self):
return self.status.index or 0
@property
def execution_name(self):
return self.labels[EXECUTION_LABEL]
@property
def running_state(self):
return self.labels[STATE_LABEL] if STATE_LABEL in self.labels else None
@property
def service_account(self):
"""The service account to use as the container identity."""
return self.spec.serviceAccountName
def ReadySymbolAndColor(self):
"""Return a tuple of ready_symbol and display color for this object."""
encoding = console_attr.GetConsoleAttr().GetEncoding()
if self.running_state == 'Running':
return self._PickSymbol('\N{HORIZONTAL ELLIPSIS}', '.',
encoding), 'yellow'
elif self.running_state == 'Succeeded':
return self._PickSymbol('\N{HEAVY CHECK MARK}', '+', encoding), 'green'
elif self.running_state == 'Failed':
return 'X', 'red'
elif self.running_state == 'Cancelled':
return '!', 'yellow'
elif self.running_state == 'Abandoned':
return '-', 'yellow'
return '.', 'yellow'
@property
def start_time(self):
return self.status.startTime
@property
def completion_time(self):
return self.status.completionTime
@property
def retries(self):
if self.status.startTime is not None:
return self.status.retried or 0
return None
@property
def last_exit_code(self):
if (self.status.lastAttemptResult is not None and
self.status.lastAttemptResult.exitCode is not None):
return self.status.lastAttemptResult.exitCode
elif self.status.completionTime is not None:
return 0
return None
@property
def last_exit_message(self):
if (self.status.lastAttemptResult is not None and
self.status.lastAttemptResult.status.message is not None):
return self.status.lastAttemptResult.status.message
return ''
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
5079e05b86e2723e8d0e70be3749a7efa59a9183
|
d10a1814735fa6e7fc9354bad8d8251eb81fa9fc
|
/core/decorators.py
|
c9c25be29165176c68365e2285ab55993572af80
|
[] |
no_license
|
SeedyROM/django-social-spotify-example
|
15d6a43045009e0f28e49f4f832bb0b1b1bbae51
|
adb6cc9cfda6d76d45ef9c3611cacfb17ba89831
|
refs/heads/master
| 2022-12-14T21:49:43.512965
| 2018-03-28T08:40:01
| 2018-03-28T08:40:01
| 127,084,250
| 3
| 0
| null | 2022-12-08T00:55:24
| 2018-03-28T04:36:03
|
Python
|
UTF-8
|
Python
| false
| false
| 696
|
py
|
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from requests.exceptions import HTTPError
from social_django.utils import load_strategy
def spotify_view(function):
@login_required
def wrap(request, *args, **kwargs):
social = request.user.social_auth.get(provider='spotify')
token = social.get_access_token(load_strategy())
try:
return function(request, token, *args, **kwargs)
except HTTPError as e:
print(f'Failed using token because of HTTPError: "{e}"')
return redirect('logout')
wrap.__doc__ = function.__doc__
wrap.__name__ = function.__name__
return wrap
|
[
"rallokkcaz@gmail.com"
] |
rallokkcaz@gmail.com
|
c005b4665be718a64a4934ba60988c4d6d45bf34
|
c4d56a69bea9daecab4a6d6dcf64ea40d22eb48e
|
/mitmproxy/tools/console/eventlog.py
|
0b8a3f8cf9167e612d17ad473ade11e91456e26b
|
[
"MIT"
] |
permissive
|
iBrandon/mitmproxy
|
981f44baa8c6ea7cfddafeb38bcf93a853b4c682
|
cafa094f75732bd803972aecb71e2d1032ee2390
|
refs/heads/master
| 2021-01-20T17:29:44.817486
| 2017-05-10T08:11:27
| 2017-05-10T08:11:27
| 90,879,892
| 2
| 0
| null | 2017-05-10T15:28:43
| 2017-05-10T15:28:43
| null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
import urwid
from mitmproxy.tools.console import signals
EVENTLOG_SIZE = 10000
class LogBufferWalker(urwid.SimpleListWalker):
pass
class EventLog(urwid.ListBox):
keyctx = "eventlog"
def __init__(self, master):
self.walker = LogBufferWalker([])
self.master = master
urwid.ListBox.__init__(self, self.walker)
signals.sig_add_log.connect(self.sig_add_log)
def set_focus(self, index):
if 0 <= index < len(self.walker):
super().set_focus(index)
def keypress(self, size, key):
if key == "z":
self.master.clear_events()
key = None
elif key == "m_end":
self.set_focus(len(self.walker) - 1)
elif key == "m_start":
self.set_focus(0)
return urwid.ListBox.keypress(self, size, key)
def sig_add_log(self, sender, e, level):
txt = "%s: %s" % (level, str(e))
if level in ("error", "warn"):
e = urwid.Text((level, txt))
else:
e = urwid.Text(txt)
self.walker.append(e)
if len(self.walker) > EVENTLOG_SIZE:
self.walker.pop(0)
if self.master.options.console_focus_follow:
self.walker.set_focus(len(self.walker) - 1)
def clear_events(self):
self.walker[:] = []
|
[
"aldo@nullcube.com"
] |
aldo@nullcube.com
|
ec1825cba9d2657ee0ecdc2ebb87aed9c258df64
|
cdad738a7085a997b5349a94aedb4db8da78da8f
|
/TreeProduction/test/crab/w01_hijing8tev_gensimtreeproduction/crab.py
|
c8cf7f7e5455d3f3de7b81e0aec48396bdacec2b
|
[
"MIT"
] |
permissive
|
tuos/DirectLoopAnalysis
|
4851d122d4723e498705c1d2cb100cbf3eda8d43
|
6f5f02538454d2240d0232665b9b17d07eb79854
|
refs/heads/master
| 2020-06-12T22:24:01.081755
| 2020-01-21T17:49:37
| 2020-01-21T17:49:37
| 194,446,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'w01_hijing8tev_gensimtreeproduction'
config.General.workArea = 'project_w01_hijing8tev_gensimtreeproduction'
config.General.transferOutputs = True
config.General.transferLogs = False
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'ConfFile_cfg.py'
config.Data.inputDataset = '/HIJING_pPb_8160_DataBS/pPb816Summer16DR-MB_80X_mcRun2_pA_v4-v2/AODSIM'
config.Data.inputDBS = 'global'
#config.Data.splitting = 'FileBased'
config.Data.splitting = 'Automatic'
#config.Data.unitsPerJob = 1
config.Data.outLFNDirBase = '/store/user/tuos/loops/cumulants/hijing/w01_hijing8tev_gensimtreeproduction'
config.Data.publication = False
config.Data.outputDatasetTag = 'w01_hijing8tev_gensimtreeproduction'
config.Site.storageSite = 'T2_US_Vanderbilt'
|
[
"shengquan.tuo@cern.ch"
] |
shengquan.tuo@cern.ch
|
4f532fb82b968462c6b2cba3a5fdbc06a4fd47c7
|
c2ff2ee2b0c84e047a80cfdf0b0d0b122fc9db79
|
/features/himario/mmediting/mmedit/datasets/generation_paired_dataset.py
|
2df184d567d46c30260f0faf1c1112ad072dd09d
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
obarnard99/vilio
|
275dcb62cdb8b2d8c55ab1e73f3a796bd2073a5b
|
77aac226c3a0910410f11a5999f8908181f57ccd
|
refs/heads/master
| 2023-06-29T17:02:02.282457
| 2021-06-22T09:50:11
| 2021-06-22T09:50:11
| 337,738,373
| 0
| 0
|
MIT
| 2021-06-22T09:50:12
| 2021-02-10T13:50:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,466
|
py
|
import os.path as osp
from .base_generation_dataset import BaseGenerationDataset
from .registry import DATASETS
@DATASETS.register_module()
class GenerationPairedDataset(BaseGenerationDataset):
"""General paired image folder dataset for image generation.
It assumes that the training directory is '/path/to/data/train'.
During test time, the directory is '/path/to/data/test'. '/path/to/data'
can be initialized by args 'dataroot'. Each sample contains a pair of
images concatenated in the w dimension (A|B).
Args:
dataroot (str | :obj:`Path`): Path to the folder root of paired images.
pipeline (List[dict | callable]): A sequence of data transformations.
test_mode (bool): Store `True` when building test dataset.
Default: `False`.
"""
def __init__(self, dataroot, pipeline, test_mode=False):
super(GenerationPairedDataset, self).__init__(pipeline, test_mode)
phase = 'test' if test_mode else 'train'
self.dataroot = osp.join(str(dataroot), phase)
self.data_infos = self.load_annotations()
def load_annotations(self):
"""Load paired image paths.
Returns:
list[dict]: List that contains paired image paths.
"""
data_infos = []
pair_paths = sorted(self.scan_folder(self.dataroot))
for pair_path in pair_paths:
data_infos.append(dict(pair_path=pair_path))
return data_infos
|
[
"obarnard99@gmail.com"
] |
obarnard99@gmail.com
|
08f69a4436ba0b5d7c98506b57dd7c74f16f4402
|
caa7c209acd1b336fcd6c0f3d9e8a58ba1eb60ad
|
/test_task1.py
|
4d95408c2199cab426d5994c5865354a8c64792e
|
[] |
no_license
|
herzenuni/sem5-firsttask-04092018-arinasaf11-2
|
b60375e511206aac94d0253ae69de6b957a9ffa2
|
10b3cc83ae8bce745e624229592667a1b18c9724
|
refs/heads/master
| 2021-07-25T14:11:26.582542
| 2018-12-28T13:09:23
| 2018-12-28T13:09:23
| 147,555,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
import unittest
import task1
class test_task():
def test_func(self):
self.assertEqual(task1.func(2,'hex'), '0x2')
def test_func1(self):
self.assertEqual(task1.func(2), 'два')
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
herzenuni.noreply@github.com
|
891b10c729cc41d184af202fe27ee44fb33c93fb
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_hungers.py
|
83a72b5e0211718be8c579621609825ad0454cb4
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
#calss header
class _HUNGERS():
def __init__(self,):
self.name = "HUNGERS"
self.definitions = hunger
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['hunger']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
88482394f3d8b2feadd61f7632ec800377781b29
|
068d271e241d8cdb46dbf4243166e4b8ee7025b2
|
/day08/day8/16.logging模块.py
|
4c9f15064ad8ca6221e2e72af4b717640d5d9fb1
|
[] |
no_license
|
caiqinxiong/python
|
f6e226e76cb62aac970bcfbcb6c8adfc64858b60
|
9029f6c528d2cb742b600af224e803baa74cbe6a
|
refs/heads/master
| 2023-05-26T19:41:34.911885
| 2020-05-15T09:02:08
| 2020-05-15T09:02:08
| 195,261,757
| 1
| 0
| null | 2021-06-10T23:33:33
| 2019-07-04T15:01:42
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
# import logging
# fh = logging.FileHandler(filename='xxx.log',encoding='utf-8')
# fh1 = logging.FileHandler(filename='xxx2.log',encoding='utf-8')
# sh = logging.StreamHandler()
# logging.basicConfig(level=logging.INFO,
# handlers=[fh,sh,fh1],
# datefmt='%Y-%m-%d %H:%M:%S',
# format='%(asctime)s - %(name)s[%(lineno)d] - %(levelname)s -%(module)s: %(message)s')
# logging.debug('debug message') # 情况越轻
# logging.info('info message') # 信息类的日志
# logging.warning('warning message')
# logging.error('error message')
# logging.critical('critical message')
# logging日志分为5个等级
# 默认只显示warning等级以上的信息
import logging
from logging import handlers
sh = logging.StreamHandler()
rh = handlers.RotatingFileHandler('myapp.log', maxBytes=1024,backupCount=5)
fh = handlers.TimedRotatingFileHandler(filename='myapp2.log', when='s', interval=5, encoding='utf-8')
logging.basicConfig(level=logging.INFO,
handlers=[rh,fh,sh],
datefmt='%Y-%m-%d %H:%M:%S',
format='%(asctime)s - %(name)s[%(lineno)d] - %(levelname)s -%(module)s: %(message)s')
while True:
logging.WARNING('')
|
[
"13269469526@163.com"
] |
13269469526@163.com
|
c3e48a7b3486a9f541e42d8e360ef80d57c5f287
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/pinpoint_write_f/recommender-configuration_create.py
|
bf2bef53865f2a8a45fe748c41e6cf0bbc15a86e
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 997
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
delete-recommender-configuration : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/pinpoint/delete-recommender-configuration.html
get-recommender-configuration : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/pinpoint/get-recommender-configuration.html
get-recommender-configurations : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/pinpoint/get-recommender-configurations.html
update-recommender-configuration : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/pinpoint/update-recommender-configuration.html
"""
write_parameter("pinpoint", "create-recommender-configuration")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
a4ea05beb61a1ae9488673785bc21a36590eeb5d
|
4569d707a4942d3451f3bbcfebaa8011cc5a128d
|
/hierwikiplugin/0.9/hierwiki/macros/parentwiki.py
|
aa746ceb1c51e30ceb2e29e717c30465cb294e13
|
[] |
no_license
|
woochica/trachacks
|
28749b924c897747faa411876a3739edaed4cff4
|
4fcd4aeba81d734654f5d9ec524218b91d54a0e1
|
refs/heads/master
| 2021-05-30T02:27:50.209657
| 2013-05-24T17:31:23
| 2013-05-24T17:31:23
| 13,418,837
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
# Macros for the HierWiki plugin
from trac.core import *
from trac.wiki.api import IWikiMacroProvider, WikiSystem
from StringIO import StringIO
import re, string, inspect
class ParentWikiMacro(Component):
"""
Inserts a link to the "parent" wiki entry.
This only applies to wikis that have a "/" in their name indicating heirarchy.
e.g. an entry named Java/Introduction will have a parent of Java. All other wiki entries have a parent of WikiStart.
"""
# TODO: Everything until render_macro can be removed once switched to be based on WikiMacroBase
implements(IWikiMacroProvider)
def get_macros(self):
"""Yield the name of the macro based on the class name."""
name = self.__class__.__name__
if name.endswith('Macro'):
name = name[:-5]
yield name
def get_macro_description(self, name):
"""Return the subclass's docstring."""
return inspect.getdoc(self.__class__)
def render_macro(self, req, name, args):
db = self.env.get_db_cnx()
cursor = db.cursor()
buf = StringIO()
prefix = None
if args:
prefix = args.replace('\'', '\'\'')
else:
prefix = req.hdf.getValue('wiki.page_name', '') + '/'
parent = 'WikiStart'
m = re.search("(\S+)/(\S+)$", prefix)
if m:
parent = m.group(1)
buf.write('<a href="%s">' % self.env.href.wiki(parent))
buf.write(parent)
buf.write('</a>\n')
return buf.getvalue()
|
[
"coderanger@7322e99d-02ea-0310-aa39-e9a107903beb"
] |
coderanger@7322e99d-02ea-0310-aa39-e9a107903beb
|
d3b1d9bcf01a6956cb9f8162f90e476652811962
|
4b4828d3c98d76d7bf38f90a015945acc408ddc5
|
/PythonAI/Source/W1D4/src/EX_REQ/ex_req_png.py
|
13d7a84e8fda327fcf1724af0cd5c4b314c0726e
|
[] |
no_license
|
Huh-jae-won/Study
|
cb5d32728e8dcded492e7edb054b500c91ec607c
|
e4dbc3fef69bb273b62b866fb5ef2a7250222f10
|
refs/heads/main
| 2023-06-20T13:06:26.691899
| 2021-07-11T07:43:41
| 2021-07-11T07:43:41
| 362,759,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
# 모듈 로딩 -------------------------------------------
import requests
# 데이터 변수 선언 -------------------------------------
URL = 'http://wikibook.co.kr/logo.png'
IMG_FILE = '../../data/test.png'
# 데이터 가져오기 ------------------------------------
res = requests.get(URL)
if int(res.status_code) == 200:
# 바이너리 형식으로 데이터 저장
with open(IMG_FILE, "wb") as f:
f.write(res.content)
print("saved")
else:
print("ERROR : ", res.status_code)
|
[
"dfr9034@naver.com"
] |
dfr9034@naver.com
|
b658f62dc2ae780047fb588ecb481e20ae1822d1
|
9b3f578e63a7e17e2b1bab5f38aa8625b8a80251
|
/descarteslabs/client/services/tasks/tests/data/dl_test_package/package/module.py
|
281e2167df79dcb724e1afd4151237c60c5b0182
|
[
"Apache-2.0"
] |
permissive
|
carderne/descarteslabs-python
|
e6f7000f08cd1569e0ddd0f7fb8e53abb6765183
|
757b480efb8d58474a3bf07f1dbd90652b46ed64
|
refs/heads/master
| 2022-12-09T23:19:02.361226
| 2020-08-13T11:52:30
| 2020-08-13T11:52:30
| 287,264,851
| 0
| 0
|
NOASSERTION
| 2020-08-13T11:46:58
| 2020-08-13T11:46:57
| null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
a_global = "A global var"
def foo():
print("foo")
def func_foo():
a_local = "A local var"
return a_local + a_global
class outer_class:
class inner_class:
@staticmethod
def func_bar():
a_local = "A local var"
return a_local + a_global
|
[
"support@descarteslabs.com"
] |
support@descarteslabs.com
|
7c50534c50b9a795e870dc44b83879518e77b022
|
f8f40422b6da71206bd45cb395761b2b56150b73
|
/virl/cli/logs/commands.py
|
96d8b20b45417e3518261d827d1d3b814dbf1f88
|
[
"MIT"
] |
permissive
|
RunSi/virlutils
|
3bb96d8a805ad884578c967c480dc51f98a4cbab
|
595bae19ea23ba589e7883bedd2076c40bfc4907
|
refs/heads/master
| 2021-01-25T13:36:53.273146
| 2017-12-15T20:06:42
| 2017-12-15T20:06:42
| 123,597,468
| 0
| 0
|
MIT
| 2018-03-02T15:42:22
| 2018-03-02T15:42:22
| null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
import click
from virl.api import VIRLServer
from virl.cli.views import log_table
from virl import helpers
@click.command()
@click.argument('env', default='default')
def logs(env, **kwargs):
"""
Retrieves log information for the provided simulation
"""
running = helpers.check_sim_running(env)
if running:
sim_name = running
server = VIRLServer()
resp = server.get_logs(sim_name)
log_table(resp.json()['events'])
else:
click.secho("could not find logs for for env: {}".format(env), fg='red')
|
[
"kecorbin@cisco.com"
] |
kecorbin@cisco.com
|
59bf04653400bc0082de29089c4bffcf7a9921fa
|
528f910908885c3ded4ecc6380b9603c8dcacbd6
|
/tbapi/top/api/rest/FenxiaoProductSkuUpdateRequest.py
|
c720f4a5fcd2ed51753fc1fa937f037da27ea87c
|
[] |
no_license
|
Monica-ckd/data007
|
15fe9c4c898a51a58100138b6b064211199d2ed1
|
0e54ae57eb719b86ec14ce9f77b027882a3398a8
|
refs/heads/master
| 2023-03-16T05:26:14.257318
| 2016-05-25T06:57:05
| 2016-05-25T06:57:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
'''
Created by auto_sdk on 2013-04-01 16:44:41
'''
from top.api.base import RestApi
class FenxiaoProductSkuUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.agent_cost_price = None
self.dealer_cost_price = None
self.product_id = None
self.properties = None
self.quantity = None
self.sku_number = None
self.standard_price = None
def getapiname(self):
return 'taobao.fenxiao.product.sku.update'
|
[
"root@u16392468.onlinehome-server.com"
] |
root@u16392468.onlinehome-server.com
|
e200015486e71bc146de42be55e36a0a0cb55b0c
|
94a6a83c8bd3f9a951ee7d48973f35d0b5b6f99c
|
/testcases/dev/GlobalSettings_dev.py
|
dee52c16156534ae6bd5896371cc00e4e651c91d
|
[] |
no_license
|
JerryLiu0821/apython
|
19766bebd5365e53aa7ea46adc01132045e91f9c
|
d9804b1099c879da1f8dc130fb205ab191f65fb1
|
refs/heads/master
| 2020-05-17T05:09:15.319167
| 2015-08-17T10:50:09
| 2015-08-17T10:50:09
| 40,886,032
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,079
|
py
|
# -*- coding: utf-8 -*-
'''
Created on Mar 27, 2013
@author: liujian
'''
import unittest
import re, time, commands
import sys
sys.path.append('../testcases')
import Stability
class TestGlobalSettings(unittest.TestCase):
def setUp(self):
try :
self.error = ''
self.setup = Stability.SetupDeviceConnections()
self.a = self.setup.initializeTestDevice()
self.stabdl = Stability.StabDL(self.a)
self.a.input.back(3)
except Exception, e :
self.a.log.debug("", "\n Set up")
def tearDown(self):
self.a.input.back(3)
def test3D(self):
"""打开关闭3D|在设置中打开关闭3D"""
try:
#self.launchLetv()
self.a.input.home()
time.sleep(3)
self.a.input.home()
time.sleep(3)
self.a.input.back()
time.sleep(2)
self.a.device.sh('input keyevent 176')
self.a.input.left(8)
self.a.input.right(2)
self.a.input.center()
w = self.a.ui.screen()
if 'mode_msg' not in str(w.ids()):
self.error = 'cannot open 3d mode in settings'
raise Exception
for i in range(3):
self.a.input.right()
self.a.input.center()
if not self.isOK():
raise Exception
self.a.input.left(3)
self.a.input.center()
except Exception, e :
self.a.log.debug("", "\n test3D")
self.fail("Error happened: %s %s" % ( self.error, e))
def testMiracast(self):
"""Miracast打开关闭|在设置中打开关闭Miracast"""
try:
self.a.device.sh('input keyevent 176')
self.a.input.left(8)
self.a.input.right(4)
time.sleep(2)
self.a.input.center()
w = self.a.ui.screen()
if 'miracast_switch' not in str(w.ids()):
self.error = 'cannot open miracast mode in settings'
raise Exception
self.a.input.down()
for i in range(6):
if '\u5173\u95ed' in str(w.texts()):
print 'open miracast'
else:
print 'close miracast'
self.a.input.center()
time.sleep(10)
if not self.isOK():
raise Exception
w = self.a.ui.screen()
self.a.input.back()
except Exception, e :
self.a.log.debug("", "\n test3D")
self.fail("Error happened: %s %s" % ( self.error, e))
def _testInstallApks(self):
"""安装外部应用|安装多个外部应用"""
try:
apksp = '../testcases/setup/apks/'
apks = commands.getoutput("ls %s" %apksp).split('\n')
for apk in apks:
os.system("adb -s %s install %s/%s" %(self.id, apksp, apk))
except Exception, e :
self.a.log.debug("", "\n testInstallApks")
self.fail("Error happened: %s %s" % ( self.error, e))
def launchLetv(self):
for i in range(3):
self.a.input.home()
time.sleep(5)
self.a.input.back(2)
time.sleep(2)
for i in range(5):
if 'com.letv.signalsourcemanager/com.letv.signalsourcemanager.MainActivity' not in str(self.a.ui.window()):
self.a.input.home()
time.sleep(2)
self.a.input.left()
time.sleep(1)
self.a.input.center()
else:
break
self.a.input.home()
time.sleep(2)
self.a.input.right(2)
self.a.input.center()
def isOK(self):
try:
widgets = self.a.ui.waitfor(
anyof=[
self.a.ui.widgetspec(id='message'),
self.a.ui.widgetspec(text='Wait')])
if widgets == 'message':
self.a.input.down()
self.a.input.center()
self.error = "Force Closed"
return False
if widgets == 'Wait':
self.a.input.down()
self.a.input.right()
self.a.input.center()
self.error = "ANR Happened"
return False
"""if widgets == idname:
self.error="Exit Without any prompt message"
return False"""
self.a.log.debug("", "No Force Closed & ANR happen!")
if 'Application Error' in str(self.a.ui.windows()):
self.a.input.right()
self.a.input.center()
return False
return True
except:
return True
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"liujian@letv.com"
] |
liujian@letv.com
|
68fbfc30ae113b14e8e307ec4775137c6e47de5d
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/97/usersdata/206/56394/submittedfiles/lecker.py
|
6a4f92871ebc5f5c9f874df4bd40bc0b6ea4393a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 956
|
py
|
# -*- coding: utf-8 -*-
n= int(input('Digite N:'))
a=[]
b=[]
contA=0
contB=0
for z in range (1, n+1,1):
valorA=float(input('valor da lista A:'))
a.append(valorA)
for i in range (0, len(a),1):
if(i==0):
if (a[i]>a[i-1]):
contA=contA + 1
elif (i==len(a)- 1):
if (a[i]>a[i-1]):
contA=contA + 1
else:
if(a[i]>a[i+1] and a[i]>a[i-1]):
contA=contA + 1
for z in range (1, n+1, 1):
valorB=float(input('Valor da lista B:'))
b.append(valorB)
for i in range (0, len(b), 1):
if(i==0):
if (b[i]>b [i+1]):
contB=contB+1
elif(i==len(b)-1):
if(b[i]>b[i-1]):
contB=contB+1
else:
if(b[i]>b[i+1] and b[i]>b{i-1]):
contB=contB+1
if(contA==1):
print('S')
else:
prin('N')
if(contB==1):
print('S')
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
726e3445787acda675e18981a98aa2e53e15c3ab
|
8de847f626ffb6b11e49bec669cb80304a66a0af
|
/plugins/dbnd-snowflake/src/dbnd_snowflake/snowflake_resources.py
|
0a37250ef330c5add20ed3cf14ca8b0387d6b7b7
|
[
"Apache-2.0"
] |
permissive
|
FHoffmannCode/dbnd
|
5ac7d766ec1bfe37f7a12605ebd12b4dcf31fba6
|
82beee1a8c752235bf21b4b0ceace5ab25410e52
|
refs/heads/master
| 2022-12-26T06:04:30.008949
| 2020-10-04T19:30:58
| 2020-10-04T19:30:58
| 301,370,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,981
|
py
|
import logging
from decimal import Decimal
from textwrap import dedent
from dbnd import log_duration, log_metrics
from dbnd_snowflake.snowflake_values import SnowflakeController
logger = logging.getLogger(__name__)
# TODO: Add support for QUERY_TAG
# I.e. Subclass SnowflakeOperator and set session param QUERY_TAG to "dbnd.{dag/task_name/task_id}"
# Then use pass this QUERY_TAG to UI for easier navigation between
# See https://community.snowflake.com/s/article/How-We-Controlled-and-Reduced-Snowflake-Compute-Cost
# https://github.com/snowflakedb/snowflake-connector-python/issues/203
def log_snowflake_resource_usage(
query_text, database, user, connection_string, session_id=None
):
"""
get and log cpu time, run time, disk read, and processed rows.
connection or connection_string is required. supports only psycopg2 connections.
"""
try:
with log_duration("log_snowflake_resource_usage__time_seconds", "system"):
_log_snowflake_resource_usage(
query_text, database, user, connection_string, session_id
)
except Exception as exc:
conn_without_pass = _censor_password(connection_string)
logger.exception(
"Failed to log_redshift_resource_usage (query_text=%s, connection_string=%s)",
query_text,
conn_without_pass,
)
def _log_snowflake_resource_usage(
query_text, database, user, connection_string, session_id=None,
):
# Quick and dirty way to handle optional clause element.
# Might be better to use SQLAlchemy expression language here
if session_id:
query_history = dedent(
"""\
select *
from table({}.information_schema.query_history(dateadd('minutes',-15,current_timestamp()),current_timestamp()))
where LOWER(query_text)=LOWER(%s) and LOWER(user_name)=LOWER(%s) and session_id=%s
order by start_time desc limit 1;"""
).format(database, session_id)
query_params = (query_text, user, session_id)
else:
query_history = dedent(
"""\
select *
from table({}.information_schema.query_history(dateadd('minutes',-15,current_timestamp()),current_timestamp()))
where LOWER(query_text)=LOWER(%s) and LOWER(user_name)=LOWER(%s)
order by start_time desc limit 1;"""
).format(database)
query_params = (query_text, user)
result = _connect_and_query(connection_string, query_history, *query_params)
if not result:
logger.info(
"resource metrics were not found for query '%s', query_params=%s",
query_text,
query_params,
)
log_metrics(
{
"snowflake_query_warning": "No resources info found",
"snowflake_query_text": query_text,
},
source="system",
)
return
metrics = result[0]
key = "snowflake_query_{}".format(
metrics["QUERY_TAG"] if metrics["QUERY_TAG"] else metrics["QUERY_ID"]
)
snowflake_metric_to_ui_name = {
"BYTES_SCANNED": "bytes_scanned",
"COMPILATION_TIME": "compilation_time_milliseconds",
"CREDITS_USED_CLOUD_SERVICES": "credits_used_cloud_services",
"EXECUTION_TIME": "execution_time_milliseconds",
"QUERY_TEXT": "query_text",
"ROWS_PRODUCED": "rows_produced",
"TOTAL_ELAPSED_TIME": "total_elapsed_time_milliseconds",
}
metrics_to_log = {}
for metric, ui_name in snowflake_metric_to_ui_name.items():
if metric in metrics:
value = metrics[metric]
# Quick hack to track decimal values. probably should be handled on a serialization level
if isinstance(value, Decimal):
value = float(value)
metrics_to_log[key + "." + ui_name] = value
log_metrics(metrics_to_log, source="system")
def _connect_and_query(connection_string, query, *params):
""" connect if needed, then query. """
# if (connection is None) and (connection_string is None):
if connection_string is None:
logger.error(
"connection and connection string are None, one of them is required to query redshift"
)
return
with SnowflakeController(connection_string) as snowflake:
return snowflake._query(query, params)
def _censor_password(connection_string):
"""
example connection string:
postgres://user:password@host.com:5439/dev
returns:
postgres://user:*****@host.com:5439/dev
"""
if (not connection_string) or ("@" not in connection_string):
return connection_string
split1 = connection_string.split("@")
split2 = split1[0].split(":")
if len(split2) != 3:
return connection_string
split2[-1] = "*****"
split2_join = ":".join(split2)
split1[0] = split2_join
split1_join = "@".join(split1)
return split1_join
|
[
"evgeny.shulman@databand.ai"
] |
evgeny.shulman@databand.ai
|
0b2ee115102da0dff844ffdbfff0f1445e2b6017
|
66fe6eb64afeb7313a4c7685a8748455325b6726
|
/1329-sort-the-matrix-diagonally.py
|
b944795e759d4d78649c943f42f1017f27392e8e
|
[] |
no_license
|
anantkaushik/leetcode
|
b54eb27b17ed95b02ab426392208c346f2d87aaa
|
06f0a6dbff2e2062fa4568efa5f01ad982d6ac94
|
refs/heads/master
| 2022-03-07T18:21:35.881943
| 2022-02-23T12:27:24
| 2022-02-23T12:27:24
| 120,501,367
| 40
| 13
| null | 2019-10-11T11:07:22
| 2018-02-06T18:05:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,243
|
py
|
"""
Problem Link: https://leetcode.com/problems/sort-the-matrix-diagonally/
A matrix diagonal is a diagonal line of cells starting from some cell in either the topmost row or leftmost column and
going in the bottom-right direction until reaching the matrix's end. For example, the matrix diagonal starting from mat[2][0],
where mat is a 6 x 3 matrix, includes cells mat[2][0], mat[3][1], and mat[4][2].
Given an m x n matrix mat of integers, sort each matrix diagonal in ascending order and return the resulting matrix.
Example 1:
Input: mat = [[3,3,1,1],[2,2,1,2],[1,1,1,2]]
Output: [[1,1,1,1],[1,2,2,2],[1,2,3,3]]
Constraints:
m == mat.length
n == mat[i].length
1 <= m, n <= 100
1 <= mat[i][j] <= 100
"""
class Solution:
def diagonalSort(self, mat: List[List[int]]) -> List[List[int]]:
diagnoals = collections.defaultdict(list)
for i, row in enumerate(mat):
for j, val in enumerate(row):
diagnoals[i-j].append(val)
for d in diagnoals.values():
d.sort(reverse=True)
for i, row in enumerate(mat):
for j, _ in enumerate(row):
mat[i][j] = diagnoals[i-j].pop()
return mat
|
[
"anant.kaushik2@gmail.com"
] |
anant.kaushik2@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.