blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
35b078652ae742c7fdce54d3d18a3085c0dfb8ae
|
85671094e4f1c1f221ff078faea3ee9f93795b57
|
/examples/dfp/v201311/contact_service/update_contacts.py
|
e80bc0cd172f861db7dc1944c336f0e6b5f71d55
|
[
"Apache-2.0"
] |
permissive
|
jdilallo/jdilallo-test
|
63631c96c8070c60ce7c07512aa51f370d8fbadf
|
8fb9bf43e7c99d5cb198c5587897f8b2514ca4c0
|
refs/heads/master
| 2020-05-18T15:35:37.264949
| 2014-03-04T18:19:31
| 2014-03-04T18:19:31
| 14,376,457
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,282
|
py
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates contact addresses.
To determine which contacts exist, run get_all_contacts.py.
Tags: ContactService.updateContacts
ContactService.getContactsByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate classes from the client library.
from googleads import dfp
# Set the ID of the contact to update.
CONTACT_ID = 'INSERT_CONTACT_ID_HERE'
def main(client, contact_id):
# Initialize appropriate service.
contact_service = client.GetService('ContactService', version='v201311')
# Create statement object to select the single contact by ID.
values = [{
'key': 'id',
'value': {
'xsi_type': 'NumberValue',
'value': contact_id
}
}]
query = 'WHERE id = :id'
statement = dfp.FilterStatement(query, values, 1)
# Get contacts by statement.
response = contact_service.getContactsByStatement(
statement.ToStatement())
if 'results' in response:
updated_contacts = []
for contact in response['results']:
contact['address'] = '123 New Street, New York, NY, 10011'
updated_contacts.append(contact)
# Update the contact on the server.
contacts = contact_service.updateContacts(updated_contacts)
# Display results.
for contact in contacts:
print (('Contact with ID \'%s\', name \'%s\', and address \'%s\' '
'was updated.')
% (contact['id'], contact['name'], contact['address']))
else:
print 'No contacts found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CONTACT_ID)
|
[
"api.jdilallo@gmail.com"
] |
api.jdilallo@gmail.com
|
5cb2f0cea82e8f0c529c8cc80985dc43f49abcf5
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/070_oop/001_classes/_exercises/exercises/ITDN Python RUS/001_Vvedenie v OOP/05-__init__.py
|
807756be14dc81c13918a5fd56093bd1ff2c31ee
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 897
|
py
|
# -*- coding: utf-8 -*-
# Начальное состояние объекта следует создавать в
# специальном методе-конструкторе __init__, который
# вызывается автоматически после создания экземпляра
# класса. Его параметры указываются при создании
# объекта.
# Класс, описывающий человека
class Person:
# Конструктор
def __init__(self, name, age):
self.name = name
self.age = age
# Метод из прошлого примера
def print_info(self):
print(self.name, 'is', self.age)
# Создание экземпляров класса
alex = Person('Alex', 18)
john = Person('John', 20)
# Вызов метода print_info
alex.print_info()
john.print_info()
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
ed33ebf9caedf48ca19abe67d46daec2605f4000
|
3d57578801ffdcfeb09c6b3551a1611b9b28b55d
|
/cosmeticsyou/accounts/migrations/0003_auto_20180316_2301.py
|
f8b52532db847607a49cc367e6aea0ba8c4f4d66
|
[] |
no_license
|
Wishez/cosmeticsyou
|
a97f01054c40a9305f7b0274f59812278d3ac593
|
c44c177fd3ea52a25003916be9eb49cbeabcbdea
|
refs/heads/master
| 2021-01-18T03:45:11.902558
| 2018-03-16T21:23:24
| 2018-03-16T21:23:24
| 85,789,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,791
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-16 20:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20180316_2201'),
]
operations = [
migrations.AlterField(
model_name='refferalconsultant',
name='num_apartment',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=999, null=True, verbose_name='Квартира'),
),
migrations.AlterField(
model_name='refferalconsultant',
name='num_home',
field=models.CharField(blank=True, max_length=5, null=True, verbose_name='Дом'),
),
migrations.AlterField(
model_name='refferalconsultant',
name='passport_data',
field=models.CharField(blank=True, max_length=26, null=True, verbose_name='Серия и номер паспорта'),
),
migrations.AlterField(
model_name='refferalconsultant',
name='region',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Почтовый Индекс'),
),
migrations.AlterField(
model_name='refferalconsultant',
name='street',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Улица'),
),
migrations.AlterField(
model_name='refferalconsultanttablerelations',
name='num_apartment',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=999, null=True, verbose_name='Квартира'),
),
migrations.AlterField(
model_name='refferalconsultanttablerelations',
name='num_home',
field=models.CharField(blank=True, max_length=5, null=True, verbose_name='Дом'),
),
migrations.AlterField(
model_name='refferalconsultanttablerelations',
name='passport_data',
field=models.CharField(blank=True, max_length=26, null=True, verbose_name='Серия и номер паспорта'),
),
migrations.AlterField(
model_name='refferalconsultanttablerelations',
name='region',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Почтовый Индекс'),
),
migrations.AlterField(
model_name='refferalconsultanttablerelations',
name='street',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Улица'),
),
migrations.AlterField(
model_name='user',
name='num_apartment',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=999, null=True, verbose_name='Квартира'),
),
migrations.AlterField(
model_name='user',
name='num_home',
field=models.CharField(blank=True, max_length=5, null=True, verbose_name='Дом'),
),
migrations.AlterField(
model_name='user',
name='passport_data',
field=models.CharField(blank=True, max_length=26, null=True, verbose_name='Серия и номер паспорта'),
),
migrations.AlterField(
model_name='user',
name='region',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Почтовый Индекс'),
),
migrations.AlterField(
model_name='user',
name='street',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Улица'),
),
]
|
[
"shiningfinger@list.ru"
] |
shiningfinger@list.ru
|
59553b07726a1959d3ea724fc29aed34185217ec
|
439c87c48c6c2c812d1faca73cbf1b632e9403dc
|
/DAYS/Day4/Remove_Empty_List.py
|
17625cb9e9a3ed34eaa82a6e3d2c8f455ce5c064
|
[
"MIT"
] |
permissive
|
saubhagyav/100_Days_Code_Challenge
|
14ca8db68e09c7ac7741f164fea8b62cb36bf2c0
|
bde41126b9342eacc488c79d01dc4b76a3651c93
|
refs/heads/main
| 2023-08-05T03:12:18.918079
| 2021-09-12T12:20:41
| 2021-09-12T12:20:41
| 389,375,066
| 2
| 2
| null | 2021-07-25T15:06:18
| 2021-07-25T15:06:17
| null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
# Approach 1:
def Remove_Empty_List(Given_List):
Result = [ele for ele in Given_List if ele != []]
return Result
Given_List = [5, 6, [], 7, 8, 9, [], 12, [], 4,[]]
print(Remove_Empty_List(Given_List))
# Approach 2:
Given_List = [5, 6, [], 7, 8, 9, [], 12, [], 4, []]
result = list(filter(None, Given_List))
print(result)
|
[
"noreply@github.com"
] |
saubhagyav.noreply@github.com
|
3971a5173cfd3587c142695cad852f22cb9cf9bd
|
060660439d4a54dfa74368c03968bee684d74930
|
/planscore/website/__init__.py
|
886e658812f37ac2edad725ac38e5a73f0f32e4d
|
[] |
no_license
|
dheerajchand/PlanScore
|
d0829e22dd1bfd20bbec58d900c4fdfaed8a0ebc
|
39b8a173f3a7f9b97db8d138e9e757bb23a0b204
|
refs/heads/master
| 2020-12-02T18:17:23.747720
| 2017-06-11T04:20:54
| 2017-06-11T04:20:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,393
|
py
|
import flask, os, urllib.parse, markdown
from .. import data, score
MODELS_BASEDIR = os.path.join(os.path.dirname(__file__), 'models')
app = flask.Flask(__name__)
app.config['PLANSCORE_S3_BUCKET'] = os.environ.get('S3_BUCKET', 'planscore')
app.config['PLANSCORE_API_BASE'] = os.environ.get('API_BASE', 'https://api.planscore.org/')
def get_data_url_pattern(bucket):
return 'https://{}.s3.amazonaws.com/{}'.format(bucket, data.UPLOAD_INDEX_KEY)
def get_geom_url_pattern(bucket):
return 'https://{}.s3.amazonaws.com/{}'.format(bucket, data.UPLOAD_GEOMETRY_KEY)
@app.route('/')
def get_index():
return flask.render_template('index.html')
@app.route('/upload.html')
def get_upload():
planscore_api_base = flask.current_app.config['PLANSCORE_API_BASE']
upload_fields_url = urllib.parse.urljoin(planscore_api_base, 'upload')
return flask.render_template('upload.html', upload_fields_url=upload_fields_url)
@app.route('/plan.html')
def get_plan():
data_url_pattern = get_data_url_pattern(flask.current_app.config['PLANSCORE_S3_BUCKET'])
geom_url_pattern = get_geom_url_pattern(flask.current_app.config['PLANSCORE_S3_BUCKET'])
return flask.render_template('plan.html', fields=score.FIELD_NAMES,
data_url_pattern=data_url_pattern, geom_url_pattern=geom_url_pattern)
@app.route('/models/')
def get_models():
model_names = list()
for (base, _, files) in os.walk(MODELS_BASEDIR):
if 'README.md' in files:
model_names.append(os.path.relpath(base, MODELS_BASEDIR))
return flask.render_template('models.html', models=model_names)
@app.route('/models/<name>/')
def get_model(name):
model_basedir = os.path.join(MODELS_BASEDIR, name)
with open(os.path.join(model_basedir, 'README.md')) as file:
model_readme = markdown.markdown(file.read())
model_files = list()
for (base, _, files) in os.walk(model_basedir):
model_files.extend([
os.path.relpath(os.path.join(base, file), model_basedir)
for file in files if file != 'README.md'])
return flask.render_template('model.html', name=name,
readme=model_readme, files=model_files)
@app.route('/models/<name>/<path:path>')
def get_model_file(name, path):
dirname, filename = os.path.split(os.path.join(MODELS_BASEDIR, name, path))
return flask.send_from_directory(dirname, filename)
|
[
"mike@teczno.com"
] |
mike@teczno.com
|
6cc768c2c0b18a6842f42aa80378fb57bbb8607e
|
b7a8d04f9fd88d66ef6d8b83a449105ae31698a4
|
/setup.py
|
090be41ad536ceb7af61d21bd576ec733d48b86c
|
[] |
no_license
|
jjkas/eels-analysis-old
|
d5ce3dbb55ed84921abfcb2476243b6783ab5d52
|
98a2fc8e394060d53f982427dd953b31d56a90fa
|
refs/heads/master
| 2020-09-13T12:39:58.384098
| 2020-01-08T00:13:44
| 2020-01-08T00:13:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 963
|
py
|
# -*- coding: utf-8 -*-
"""
To upload to PyPI, PyPI test, or a local server:
python setup.py bdist_wheel upload -r <server_identifier>
"""
import setuptools
import os
setuptools.setup(
name="nionswift-eels-analysis",
version="0.4.4",
author="Nion Software",
author_email="swift@nion.com",
description="Library and UI for doing EELS analysis with Nion Swift.",
long_description=open("README.rst").read(),
url="https://github.com/nion-software/eels-analysis",
packages=["nion.eels_analysis", "nion.eels_analysis.test", "nionswift_plugin.nion_eels_analysis", "nionswift_plugin.nion_eels_analysis.test"],
package_data={"nion.eels_analysis": ["resources/*"]},
install_requires=["nionswift>=0.14.0"],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 3.6",
],
include_package_data=True,
test_suite="nion.eels_analysis.test",
python_requires='~=3.6',
)
|
[
"cmeyer1969@gmail.com"
] |
cmeyer1969@gmail.com
|
b5bcc647a60de463a5d2a205fe7a95114861f91d
|
7bd3c35070d40724ab21e83b4d3f5ba39e455818
|
/signup/sign_up/views.py
|
f540eb11011cd77dde5d49812f013f0b1351bbb0
|
[] |
no_license
|
shatishdesai202/Django-Project-Practice
|
9433004de6fd72dd0cd56cb4ff7770ecded6a054
|
f66ee507fcf959d148627c1c2f5d587b10adc996
|
refs/heads/main
| 2023-03-12T17:14:15.823285
| 2021-03-07T05:32:07
| 2021-03-07T05:32:07
| 345,263,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
from django.shortcuts import render
from django.contrib.auth.forms import UserCreationForm
from .forms import SignupForm
# Create your views here.
def sign(request):
if request.method == "POST":
form = SignupForm(request.POST)
if form.is_valid():
form.save()
form = SignupForm()
else:
form = SignupForm()
context = {'form':form}
return render(request, 'base.html', context)
|
[
"sdondjango@gmail.com"
] |
sdondjango@gmail.com
|
0c22e89f87f706b7c281d9efdf6c8fb932fb7278
|
7259dbcc9e32502945d362caa43d4ad380cd04ea
|
/Login_Pingtai_Code/Login_Pingtai_Code/spiders/zujuan.py
|
5df1c61f584726695543507746195569516801d9
|
[
"MIT"
] |
permissive
|
Doraying1230/Python-Study
|
daa143c133262f4305624d180b38205afe241163
|
8dccfa2108002d18251053147ccf36551d90c22b
|
refs/heads/master
| 2020-03-29T13:46:13.061373
| 2018-07-26T15:19:32
| 2018-07-26T15:19:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,032
|
py
|
'''
Created on 2017年10月27日
@author: deppon
'''
from scrapy.spiders import CrawlSpider
import scrapy
from scrapy.http import Request,FormRequest
class ZuJuanSpider(CrawlSpider):
name = "ZuJuanSpider"
account = '13653978879'
pwd = '123456'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
'Host':'passport.zujuan.com',
'Origin':'http://passport.zujuan.com',
'X - Requested - With': 'XMLHttpRequest',
'Connection': 'keep - alive'
}
def __init__(self, *a, **kw):
super(ZuJuanSpider, self).__init__(*a, **kw)
self.meta = eval(kw['meta'])
def start_requests(self):
"""第一次请求一下登录页面,设置开启cookie使其得到cookie,设置回调函数"""
return [Request(url=self.meta['login_url'],meta={'cookiejar':1},callback=self.parse,headers=self.headers)]
def parse(self, response):
_csrf = self.extract_from_xpath(response, "//input[@name='_csrf']/@value")
print('_csrf ===',_csrf)
formdata={'_csrf': _csrf,
'LoginForm[username]': self.account,
'LoginForm[password]': self.pwd,
'LoginForm[rememberMe]':'0'}
# 响应Cookie
Cookie1 = response.headers.getlist('Set-Cookie') #查看一下响应Cookie,也就是第一次访问注册页面时后台写入浏览器的Cookie
print("响应Cookie ====",Cookie1)
print('登录中')
"""第二次用表单post请求,携带Cookie、浏览器代理、用户登录信息,进行登录给Cookie授权"""
return [FormRequest.from_response(response,
url= self.meta['login_post_url'], #真实post地址
meta={'cookiejar':response.meta['cookiejar']},
formdata=formdata,
callback=self.after_login
)]
def after_login(self,response):
yield scrapy.Request(url=self.meta['login_sucess_url'],callback=self.get_json_data,meta=response.meta)
def get_json_data(self,response):
# 请求Cookie
Cookie2 = response.request.headers.getlist('Cookie')
print("登陸成功後 cookie =====",Cookie2)
a = response.body.decode("utf-8")
print("登录后响应信息 ====",a)
def extract_from_xpath(self, response, xpath, return_first=True, return_selector=False, embedded_content=False):
if return_selector:
return response.xpath(xpath)
else:
if return_first:
if embedded_content:
return response.xpath(xpath).xpath('string(.)').extract()
return response.xpath(xpath).extract_first()
return response.xpath(xpath).extract()
|
[
"2829969299@qq.com"
] |
2829969299@qq.com
|
112a8a4a05140b04fe14ae7faff078f55f0b9100
|
3eda7828febd06dc5173db03a5c9191a60f44c65
|
/boyue_index.py
|
cbe30449aa4b9c9a7da9dec85ede29e53906042d
|
[] |
no_license
|
xzk-seu/Auto_home_spider
|
2dd95fdc35177b1ab5050d8efbd811a51328a570
|
d2016cc2de6d214097210e50755819ee5e4ea342
|
refs/heads/master
| 2020-04-11T06:40:13.405381
| 2019-10-10T08:53:04
| 2019-10-10T08:53:04
| 161,587,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
import json
import os
from multiprocessing import Pool
from get_index import get_index
def write_result(page):
path = os.path.join(os.getcwd(), 'index', 'boyue')
if not os.path.exists(path):
os.makedirs(path)
file_path = os.path.join(path, str(page)+'.json')
if os.path.exists(file_path) and os.path.getsize(file_path) != 0:
print('file: %d is existing!' % page)
return
url = 'https://club.autohome.com.cn/bbs/forum-c-3788-%d.html'
temp = get_index(url % page)
with open(file_path, 'w') as fw:
json.dump(temp, fw)
print('PAGE: %d done!' % page)
def safe_write(page):
try:
write_result(page)
except Exception as e:
print('in safe_wire: %s' % e)
def get_boyue_index(page_limit):
pool = Pool(8)
for i in range(1, page_limit + 1):
pool.apply_async(safe_write, args=(i,))
pool.close()
pool.join()
if __name__ == '__main__':
get_boyue_index(1000)
|
[
"1399350807@qq.com"
] |
1399350807@qq.com
|
0988de8091599db64228d0877e95f48dc311de48
|
45dd427ec7450d2fac6fe2454f54a130b509b634
|
/lecture_10/direct_needle_patch.py
|
851a9e8f3135d0940606f49e288d151b46cfc7cf
|
[] |
no_license
|
weka511/smac
|
702fe183e3e73889ec663bc1d75bcac07ebb94b5
|
0b257092ff68058fda1d152d5ea8050feeab6fe2
|
refs/heads/master
| 2022-07-02T14:24:26.370766
| 2022-06-13T00:07:36
| 2022-06-13T00:07:36
| 33,011,960
| 22
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
from random import uniform
from math import sqrt
a = 0.6
b = 1.0
n_hits = 0
n_steps = 1000000
for n in range(n_steps):
x_center = uniform(0.0, b * 0.5)
while True:
dx = uniform(0.0, 1.0)
dy = uniform(0.0, 1.0)
rad = sqrt(dx ** 2 + dy ** 2)
if rad <= 1.0: break
x_tip = x_center - a * 0.5 * dx / rad
if x_tip < 0.0: n_hits += 1
print (a * 2.0 * n_steps / float(n_hits) / b)
|
[
"simon@greenweaves.nz"
] |
simon@greenweaves.nz
|
f8f9fb74ddf71f3055a2aa88c2b4744aad2d2cfa
|
8cc30a27835e205a3476783106ca1605a6a85c48
|
/amy/autoemails/tests/test_admin_cancel.py
|
b31f65598da6d8bf05f56b27fdfde600d4560103
|
[
"MIT"
] |
permissive
|
gaybro8777/amy
|
d968edc78bbd3f63f3353450334721628dbbc0f4
|
3cf99aed58a0f0acf83d2645a30d8408208ccea9
|
refs/heads/develop
| 2023-03-07T22:08:28.692700
| 2021-02-23T18:06:06
| 2021-02-23T18:06:06
| 341,930,505
| 0
| 0
|
MIT
| 2021-02-24T17:22:08
| 2021-02-24T14:40:43
| null |
UTF-8
|
Python
| false
| false
| 7,731
|
py
|
from datetime import timedelta
from django.test import TestCase
from django.urls import reverse
from rq import Queue
from rq.exceptions import NoSuchJobError
from autoemails import admin
from autoemails.models import EmailTemplate, Trigger, RQJob
from autoemails.job import Job
from autoemails.tests.base import FakeRedisTestCaseMixin, dummy_job
from workshops.tests.base import SuperuserMixin
class TestAdminJobCancel(SuperuserMixin, FakeRedisTestCaseMixin, TestCase):
def setUp(self):
super().setUp()
self._setUpSuperuser() # creates self.admin
# save scheduler and connection data
self._saved_scheduler = admin.scheduler
# overwrite
admin.scheduler = self.scheduler
# fake RQJob
self.email = EmailTemplate.objects.create(slug="test-1")
self.trigger = Trigger.objects.create(action="new-instructor",
template=self.email)
self.rqjob = RQJob.objects.create(job_id="fake-id",
trigger=self.trigger)
def tearDown(self):
super().tearDown()
# bring back saved scheduler
admin.scheduler = self._saved_scheduler
def test_view_doesnt_allow_GET(self):
# log admin user
self._logSuperuserIn()
url = reverse('admin:autoemails_rqjob_cancel', args=[self.rqjob.pk])
rv = self.client.get(url)
self.assertEqual(rv.status_code, 405) # Method not allowed
def test_view_access_by_anonymous(self):
url = reverse('admin:autoemails_rqjob_cancel', args=[self.rqjob.pk])
rv = self.client.post(url)
self.assertEqual(rv.status_code, 302)
# cannot check by assertRedirect because there's additional `?next`
# parameter
self.assertTrue(rv.url.startswith(reverse('login')))
def test_view_access_by_admin(self):
# log admin user
self._logSuperuserIn()
# try accessing the view again
url = reverse('admin:autoemails_rqjob_cancel', args=[self.rqjob.pk])
rv = self.client.post(url)
self.assertEqual(rv.status_code, 302)
self.assertRedirects(rv, reverse('admin:autoemails_rqjob_preview',
args=[self.rqjob.pk]))
def test_no_such_job(self):
# log admin user
self._logSuperuserIn()
with self.assertRaises(NoSuchJobError):
Job.fetch(self.rqjob.job_id, connection=self.scheduler.connection)
url = reverse('admin:autoemails_rqjob_cancel', args=[self.rqjob.pk])
rv = self.client.post(url, follow=True)
self.assertIn(
'The corresponding job in Redis was probably already executed',
rv.content.decode('utf-8'),
)
def test_job_executed(self):
"""Ensure executed job is discovered."""
# log admin user
self._logSuperuserIn()
# enqueue and then create an RQJob
job = self.queue.enqueue(dummy_job)
rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger)
Job.fetch(job.id, connection=self.scheduler.connection) # no error
with self.connection.pipeline() as pipe:
pipe.watch(self.scheduler.scheduled_jobs_key)
# no jobs in scheduler
self.assertIsNone(
pipe.zscore(
self.scheduler.scheduled_jobs_key, job.id
)
)
url = reverse('admin:autoemails_rqjob_cancel', args=[rqjob.pk])
rv = self.client.post(url, follow=True)
self.assertIn(
'Job has unknown status or was already executed.',
rv.content.decode('utf-8'),
)
def test_enqueued_job_cancelled(self):
"""Ensure enqueued job is successfully cancelled."""
# log admin user
self._logSuperuserIn()
# enqueue a job to run in future
job = self.scheduler.enqueue_in(
timedelta(minutes=5),
dummy_job,
)
rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger)
# fetch job data
job = Job.fetch(rqjob.job_id, connection=self.scheduler.connection)
# `None` status is characteristic to scheduler-queued jobs.
# Jobs added to the queue without scheduler will have different
# status.
self.assertEqual(job.get_status(), None)
# the job is in scheduler's queue
with self.connection.pipeline() as pipe:
pipe.watch(self.scheduler.scheduled_jobs_key)
# job in scheduler
self.assertIsNotNone(
pipe.zscore(
self.scheduler.scheduled_jobs_key, job.id
)
)
# cancel the job
url = reverse('admin:autoemails_rqjob_cancel', args=[rqjob.pk])
rv = self.client.post(url, follow=True)
self.assertIn(
f'The job {rqjob.job_id} was cancelled.',
rv.content.decode('utf-8'),
)
# the job is no longer in scheduler's queue
with self.connection.pipeline() as pipe:
pipe.watch(self.scheduler.scheduled_jobs_key)
# job in scheduler
self.assertIsNone(
pipe.zscore(
self.scheduler.scheduled_jobs_key, job.id
)
)
# job status updated
rqjob.refresh_from_db()
self.assertEqual(rqjob.status, "cancelled")
# job data still available
Job.fetch(rqjob.job_id, connection=self.scheduler.connection)
# ...but nothing is scheduled
self.assertEqual(self.scheduler.count(), 0)
def test_running_job_cancelled(self):
"""Ensure running job is not cancelled."""
# Create an asynchronous queue.
# The name `separate_queue` used here is to ensure the queue isn't
# used anywhere else.
queue = Queue('separate_queue', connection=self.connection)
# log admin user
self._logSuperuserIn()
# add job to the queue
job = queue.enqueue(dummy_job)
self.assertEqual(job.get_status(), 'queued')
# log the job in our system as RQJob
rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger)
# force the job status to be "started"
job.set_status('started')
self.assertTrue(job.is_started)
url = reverse('admin:autoemails_rqjob_cancel', args=[rqjob.pk])
rv = self.client.post(url, follow=True)
self.assertIn(
f'Job {rqjob.job_id} has started and cannot be cancelled.',
rv.content.decode('utf-8'),
)
def test_other_status_job(self):
"""Ensure jobs with other statuses are handled."""
# Create an asynchronous queue.
# The name `separate_queue` used here is to ensure the queue isn't
# used anywhere else.
queue = Queue('separate_queue', connection=self.connection)
# log admin user
self._logSuperuserIn()
# add job to the queue
job = queue.enqueue(dummy_job)
self.assertEqual(job.get_status(), 'queued')
# log the job in our system as RQJob
rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger)
# force the job status to be "deferred" (could be something else,
# except for "started" and "queued")
job.set_status('deferred')
self.assertTrue(job.is_deferred)
url = reverse('admin:autoemails_rqjob_cancel', args=[rqjob.pk])
rv = self.client.post(url, follow=True)
self.assertIn(
'Job has unknown status or was already executed.',
rv.content.decode('utf-8'),
)
|
[
"piotr@banaszkiewicz.org"
] |
piotr@banaszkiewicz.org
|
f4d666432c4c4b022a452ca50ccd90fffad423ab
|
df3b60c38d22497f3169375491a278255209615b
|
/mqtt/cloud/testmqtt.py
|
facb8807f7c49de5770a9876ed3a8536ccfccb9b
|
[] |
no_license
|
juanengml/Sistema_de_Monitoramento_de_Salas_GPIOT-UTFPR-TD
|
2c3c8d67ce8aa555eb07233ba52411cd1314c488
|
23f20e865910f48b0074a35f95ebfae5e6cbbd92
|
refs/heads/master
| 2022-12-24T01:21:58.551642
| 2020-01-09T21:30:19
| 2020-01-09T21:30:19
| 149,667,511
| 0
| 2
| null | 2022-12-18T14:40:02
| 2018-09-20T20:35:32
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,578
|
py
|
import paho.mqtt.client as mqtt
#import RPi.GPIO as GPIO
import json
THINGSBOARD_HOST = 'YOUR_THINGSBOARD_IP_OR_HOSTNAME'
ACCESS_TOKEN = 'RASPBERRY_PI_DEMO_TOKEN'
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, rc, *extra_params):
print('Connected with result code ' + str(rc))
# Subscribing to receive RPC requests
client.subscribe('v1/devices/me/rpc/request/+')
# Sending current GPIO status
client.publish('v1/devices/me/attributes', JSON, 1)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print 'Topic: ' + msg.topic + '\nMessage: ' + str(msg.payload)
# Decode JSON request
data = json.loads(msg.payload)
# Check request method
if data['method'] == 'getGpioStatus':
# Reply with GPIO status
client.publish(msg.topic.replace('request', 'response'), JSON, 1)
elif data['method'] == 'setGpioStatus':
# Update GPIO status and reply
client.publish(msg.topic.replace('request', 'response'), JSON, 1)
client.publish('v1/devices/me/attributes', JSON, 1)
client = mqtt.Client()
# Register connect callback
client.on_connect = on_connect
# Registed publish message callback
client.on_message = on_message
# Set access token
client.username_pw_set(ACCESS_TOKEN)
# Connect to ThingsBoard using default MQTT port and 60 seconds keepalive interval
client.connect(THINGSBOARD_HOST, 1883, 60)
try:
client.loop_forever()
except KeyboardInterrupt:
GPIO.cleanup()
|
[
"juanengml@gmail.com"
] |
juanengml@gmail.com
|
60dc64a27d8279c8669ee5555da915651affded0
|
e9dd4ab2ffd84fa6e5c3c5b097aa2b088860e1ec
|
/btre/urls.py
|
a566737c4b59c0c84b2813dbcd0ef15ad330cf38
|
[] |
no_license
|
AkshayVKumar/btre
|
f8434195080a597f6d3346c3103574f0d8b26de8
|
4276e710d850ae0f552cf2b1312015a196e3b8f4
|
refs/heads/main
| 2023-03-29T04:22:49.478403
| 2021-04-01T18:51:16
| 2021-04-01T18:51:16
| 353,791,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,178
|
py
|
"""btre URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
import pages
import listings
import realtors
import accounts
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('pages.urls')),
path('listings/',include("listings.urls")),
path('realtors/',include("realtors.urls")),
path('accounts/',include("accounts.urls")),
path('contacts/',include("contacts.urls"))
]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
[
"akshayvk64@gmail.com"
] |
akshayvk64@gmail.com
|
2579021a8df011296303f7a5a2c966d2a68c05af
|
a7efc71f80fcf7085d5357c6e41e37a1518413eb
|
/src/sentimental_analysis/preprocess.py
|
2bdfa6f4463419aa94d74d35abf4b3e0fb436ca1
|
[
"MIT"
] |
permissive
|
stormsinbrewing/Real_Time_Social_Media_Mining
|
398ad382567d0d1b39bf2a479cf52933f36009b0
|
86b16f763d1f57c1a1f1a26808d3b36bfa364358
|
refs/heads/master
| 2023-04-23T07:46:01.690005
| 2021-05-03T03:44:18
| 2021-05-03T03:44:18
| 234,296,979
| 25
| 11
|
MIT
| 2023-03-24T23:56:55
| 2020-01-16T10:39:34
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,708
|
py
|
import re
import sys
from utils import write_status
from nltk.stem.porter import PorterStemmer
def preprocess_word(word):
# Remove punctuation
word = word.strip('\'"?!,.():;')
# Convert more than 2 letter repetitions to 2 letter
# funnnnny --> funny
word = re.sub(r'(.)\1+', r'\1\1', word)
# Remove - & '
word = re.sub(r'(-|\')', '', word)
return word
def is_valid_word(word):
# Check if word begins with an alphabet
return (re.search(r'^[a-zA-Z][a-z0-9A-Z\._]*$', word) is not None)
def handle_emojis(tweet):
# Smile -- :), : ), :-), (:, ( :, (-:, :')
tweet = re.sub(r'(:\s?\)|:-\)|\(\s?:|\(-:|:\'\))', ' EMO_POS ', tweet)
# Laugh -- :D, : D, :-D, xD, x-D, XD, X-D
tweet = re.sub(r'(:\s?D|:-D|x-?D|X-?D)', ' EMO_POS ', tweet)
# Love -- <3, :*
tweet = re.sub(r'(<3|:\*)', ' EMO_POS ', tweet)
# Wink -- ;-), ;), ;-D, ;D, (;, (-;
tweet = re.sub(r'(;-?\)|;-?D|\(-?;)', ' EMO_POS ', tweet)
# Sad -- :-(, : (, :(, ):, )-:
tweet = re.sub(r'(:\s?\(|:-\(|\)\s?:|\)-:)', ' EMO_NEG ', tweet)
# Cry -- :,(, :'(, :"(
tweet = re.sub(r'(:,\(|:\'\(|:"\()', ' EMO_NEG ', tweet)
return tweet
def preprocess_tweet(tweet):
processed_tweet = []
# Convert to lower case
tweet = tweet.lower()
# Replaces URLs with the word URL
tweet = re.sub(r'((www\.[\S]+)|(https?://[\S]+))', ' URL ', tweet)
# Replace @handle with the word USER_MENTION
tweet = re.sub(r'@[\S]+', 'USER_MENTION', tweet)
# Replaces #hashtag with hashtag
tweet = re.sub(r'#(\S+)', r' \1 ', tweet)
# Remove RT (retweet)
tweet = re.sub(r'\brt\b', '', tweet)
# Replace 2+ dots with space
tweet = re.sub(r'\.{2,}', ' ', tweet)
# Strip space, " and ' from tweet
tweet = tweet.strip(' "\'')
# Replace emojis with either EMO_POS or EMO_NEG
tweet = handle_emojis(tweet)
# Replace multiple spaces with a single space
tweet = re.sub(r'\s+', ' ', tweet)
words = tweet.split()
for word in words:
word = preprocess_word(word)
if is_valid_word(word):
if use_stemmer:
word = str(porter_stemmer.stem(word))
processed_tweet.append(word)
return ' '.join(processed_tweet)
def preprocess_csv(csv_file_name, processed_file_name, test_file=False):
save_to_file = open(processed_file_name, 'w')
with open(csv_file_name, 'r') as csv:
lines = csv.readlines()
total = len(lines)
for i, line in enumerate(lines):
tweet_id = line[:line.find(',')]
if not test_file:
line = line[1 + line.find(','):]
positive = int(line[:line.find(',')])
line = line[1 + line.find(','):]
tweet = line
processed_tweet = preprocess_tweet(tweet)
if not test_file:
save_to_file.write('%s,%d,%s\n' %
(tweet_id, positive, processed_tweet))
else:
save_to_file.write('%s,%s\n' %
(tweet_id, processed_tweet))
write_status(i + 1, total)
save_to_file.close()
print '\nSaved processed tweets to: %s' % processed_file_name
return processed_file_name
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Usage: python preprocess.py <raw-CSV>'
exit()
use_stemmer = False
csv_file_name = sys.argv[1]
processed_file_name = sys.argv[1][:-4] + '-processed.csv'
if use_stemmer:
porter_stemmer = PorterStemmer()
processed_file_name = sys.argv[1][:-4] + '-processed-stemmed.csv'
preprocess_csv(csv_file_name, processed_file_name, test_file=False)
|
[
"500060720@stu.upes.ac.in"
] |
500060720@stu.upes.ac.in
|
7364facf97bbe797e4688c8529979c572f738f7e
|
0a1f8957a798006deaa53d10d09f733fab1e6b05
|
/src/Python27Packages/PCC/PCC/SRC_regress.py
|
c0cdbdc0e11180870ab974a14325a7d483abc881
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
metamorph-inc/meta-core
|
a89504ccb1ed2f97cc6e792ba52e3a6df349efef
|
bc7a05e04c7901f477fe553c59e478a837116d92
|
refs/heads/master
| 2023-03-07T02:52:57.262506
| 2023-03-01T18:49:49
| 2023-03-01T18:49:49
| 40,361,476
| 25
| 15
|
NOASSERTION
| 2023-01-13T16:54:30
| 2015-08-07T13:21:24
|
Python
|
UTF-8
|
Python
| false
| false
| 788
|
py
|
from numpy import *
def SRC_regress(X, Y, otpt, N):
# Add a constant term.
X = insert(X,0,1,1) #insert a column of 1's in the 0th column, axis 1
# Find the least squares solution by the use of Matlab backslash operator.
# b is the vector of regression coefficients.
r2=[]
b=zeros((X.shape[1],otpt))
r=zeros((X.shape[1],otpt))
for p in range(otpt):
b[:,p], resid = linalg.lstsq(X, Y[:, p])[:2]
r2.append((1 - resid / (Y[:, p].size * Y[:, p].var()))[0])
r[:, p] = b[:, p] * asarray((std(X,0).T / std(Y[:, p]) ).T)
# [b(:,p),~,~,~,stats] = regress(Y(:,p),X)
# r(:,p) = b(:,p).*std(X)'/std(Y(:,p))
# stat(p)=stats(1)
# "Standardize" the regression coefficients.
# Remove the constant term.
return r[1:], r2
|
[
"kevin.m.smyth@gmail.com"
] |
kevin.m.smyth@gmail.com
|
ab7e8c8da4c8e78c4b5a9b13b5e3a0c286628d78
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/80/usersdata/160/48865/submittedfiles/moedas.py
|
fbd54a4f21eb65c2842ba5b47bd06575b228b31a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
a=int(input('Digite os valores disponíveis das moedas:'))
b=int(input('Digite os valores disponíveis das moedas:'))
c=int(input('Digite a cédula:'))
cont=0
qa=c//a
qb=0
while qa>=0:
troca=c-qa*a
if troca%b==0:
qb=troca//b
cont=cont+1
break
else:
qa=qa-1
if cont>0:
print qa
print qb
else:
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
71c35b3ac6e90b946930f337822763a56891fa6d
|
ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1
|
/res/packages/scripts/scripts/client/gui/Scaleform/genConsts/DAMAGE_LOG_SHELL_BG_TYPES.py
|
fbb14d7600d8dbd920244235784e18d9819b1d2f
|
[] |
no_license
|
webiumsk/WOT-0.9.20.0
|
de3d7441c5d442f085c47a89fa58a83f1cd783f2
|
811cb4e1bca271372a1d837a268b6e0e915368bc
|
refs/heads/master
| 2021-01-20T22:11:45.505844
| 2017-08-29T20:11:38
| 2017-08-29T20:11:38
| 101,803,045
| 0
| 1
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 588
|
py
|
# 2017.08.29 21:48:42 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/genConsts/DAMAGE_LOG_SHELL_BG_TYPES.py
"""
This file was generated using the wgpygen.
Please, don't edit this file manually.
"""
class DAMAGE_LOG_SHELL_BG_TYPES(object):
GOLD = 'gold'
WHITE = 'white'
EMPTY = 'empty'
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\genConsts\DAMAGE_LOG_SHELL_BG_TYPES.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:48:42 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
a588d3e28d3956602608435b88c2bdcd3d980823
|
4dbd12da17cc45a5482afc8cea02051e798731a9
|
/venv/Scripts/django-admin.py
|
9c4c65edbd574a17c31f9ba77b2beca874b75b41
|
[] |
no_license
|
tsicroxe/django_projects
|
71b9bec6d834f53fde892606799b4bc96ba45a91
|
c11036c78d120e5ffa51055e2999dbe05b0d36eb
|
refs/heads/master
| 2021-01-11T07:03:53.045558
| 2016-12-07T20:46:05
| 2016-12-07T20:46:05
| 71,937,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
#!c:\users\bani_\desktop\codingdojo\djangoprojects\django_test\venv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"arbanakus@gmail.com"
] |
arbanakus@gmail.com
|
8ecd5eea7039d1b701170a32d86388d3b651d501
|
68ee9027d4f780e1e5248a661ccf08427ff8d106
|
/extra/unused/aggregate_icesat.py
|
aa9940239df97213a6bb5453b7674695ac207a66
|
[
"MIT"
] |
permissive
|
whyjz/CARST
|
87fb9a6a62d39fd742bb140bddcb95a2c15a144c
|
4fc48374f159e197fa5a9dbf8a867b0a8e0aad3b
|
refs/heads/master
| 2023-05-26T20:27:38.105623
| 2023-04-16T06:34:44
| 2023-04-16T06:34:44
| 58,771,687
| 17
| 4
|
MIT
| 2021-03-10T01:26:04
| 2016-05-13T20:54:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,152
|
py
|
#!/usr/bin/python
# aggregate_icesat.py
# Author: Andrew Kenneth Melkonian
# All rights reserved
def aggregate_icesat(icesat_path, input_dem_xyz_txt_dir, input_dem_xyz_txt_identifier, output_label):
import os;
assert os.path.exists(icesat_path), "\n***** ERROR: " + icesat_path + " does not exist, exiting...\n";
assert os.path.exists(input_dem_xyz_txt_dir), "\n***** ERROR: " + input_dem_xyz_txt_dir + " does not exist, exiting...\n";
max_elev = "1520";
min_elev = "-100";
interval = 120.;
icesat_unc = "0.5";
coords = {};
xy = "";
import re;
infile = open(icesat_path, "r");
for line in infile:
elements = line.split();
if len(elements) > 2 and elements[2].find("NaN") < 0:
x = elements[0].strip();
y = elements[1].strip();
x = x[ : re.search("0*$",x).start(0)];
y = y[ : re.search("0*$",y).start(0)];
if float(elements[5]) > float(max_elev):
continue;
elif float(elements[5]) <= float(min_elev):
continue;
xy = x + " " + y;
if xy not in coords:
coords[xy] = "";
coords[xy] = coords[xy] + xy + " " + elements[2].strip() + " " + elements[3].strip() + " " + elements[4].strip() + "\n";
infile.close();
contents = os.listdir(input_dem_xyz_txt_dir);
input_dem_xyz_txt_names = [item for item in contents if re.search(".*" + input_dem_xyz_txt_identifier + "\.txt$", item)];
for item in input_dem_xyz_txt_names:
if re.search(icesat_path[icesat_path.rfind("/") + 1 : ], input_dem_xyz_txt_dir + "/" + item):
continue;
infile = open(input_dem_xyz_txt_dir + "/" + item, "r");
for line in infile:
elements = line.split();
if len(elements) > 2 and elements[2].find("NaN") < 0:
x = elements[0].strip();
y = elements[1].strip();
x = x[ : re.search("0*$",x).start(0)];
y = y[ : re.search("0*$",y).start(0)];
if float(elements[2]) > float(max_elev):
continue;
elif float(elements[2]) <= float(min_elev):
continue;
xy = x + " " + y;
if xy not in coords:
continue;
# coords[xy] = "";
coords[xy] = coords[xy] + xy + " " + elements[2].strip() + " " + elements[3].strip() + " " + elements[4].strip() + "\n";
infile.close();
# import math;
# import subprocess;
# x_ref, y_ref = xy.split();
# infile = open(icesat_path, "r");
# for line in infile:
# if line.find("# @D") > -1:
# elements = line.split("|");
# date = elements[0];
# x = elements[3];
# y = elements[4];
# h_ell = elements[5];
# new_x = str(float(math.ceil((float(x) - float(x_ref)) / interval)) * interval + float(x_ref));
# new_y = str(float(math.ceil((float(y) - float(y_ref)) / interval)) * interval + float(y_ref));
# xy = new_x + " " + new_y;
# year = date[4:8];
# month = date[8:10];
# day = date[10:12];
# hour = "12";
# minute = "00";
# second = "00";
# cmd = "\ndate +\"%s\" -d \"" + year + "-" + month + "-" + day + " " + hour + ":" + minute + ":" + second + "\"\n";
# pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
# secs = pipe.read().strip();
# pipe.close();
# cmd = "\ndate +\"%s\" -d \"" + year + "-01-01 00:00:00\"\n";
# pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
# year_secs = pipe.read().strip();
# pipe.close();
# date = str(float(year) + (float(secs) - float(year_secs)) / (24.0 * 60.0 * 60.0 * 365.25));
# if xy not in coords:
# coords[xy] = "";
# coords[xy] = coords[xy] + xy + " " + h_ell + " " + date + " " + icesat_unc + "\n";
# infile.close();
outfile = open(output_label + ".txt", "w");
for xy in coords:
outfile.write(coords[xy]);
outfile.write(">\n");
outfile.close();
return;
if __name__ == "__main__":
import os;
import sys;
assert len(sys.argv) > 4, "\n***** ERROR: aggregate_icesat.py requires 4 arguments, " + str(len(sys.argv) - 1) + " given\n";
assert os.path.exists(sys.argv[1]), "\n***** ERROR: " + sys.argv[1] + " does not exist\n";
assert os.path.exists(sys.argv[2]), "\n***** ERROR: " + sys.argv[2] + " does not exist\n";
aggregate_icesat(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]);
exit();
|
[
"wz278@cornell.edu"
] |
wz278@cornell.edu
|
cb82f5cc168695fdf732dccb1eb23dab8368ac8f
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_340/ch18_2020_03_24_20_35_15_928242.py
|
929b2954cd5a46a52474fe5ed86685430753dd03
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
def verifica_idade(idade):
if idade<18:
return "Não está liberado"
elif idade>=18 and idade<21:
return "Liberado no BRASIL"
else:
return "Liberado EUA e BRASIL"
|
[
"you@example.com"
] |
you@example.com
|
b87d7438f6510080b17e0f5f71038725daf202f8
|
3917a54838a67dafd6b17aa2cba51144f3f242d0
|
/demo/amrparsing/vw_pred_to_amr.py
|
7d0fff434814f3cfacb9ab73e8309d77623f372c
|
[] |
no_license
|
raosudha89/vowpal_wabbit
|
29e25533113a33a39b64ccacbbef5452e41590a8
|
03e973838e022149d802ec3f5e2817dcbc9019d5
|
refs/heads/master
| 2021-01-21T13:29:27.941872
| 2016-06-03T01:56:25
| 2016-06-03T01:56:25
| 43,918,094
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,055
|
py
|
import sys, os
import networkx as nx
import pdb
import pickle as p
NULL_CONCEPT = 1
def update_graph(amr_nx_graph, concept_nx_graph, nx_parent, parent, count):
for child in concept_nx_graph.successors(parent):
child_idx = 'n'+str(count)
count += 1
if "\"" in concept_nx_graph.node[child]['instance']:
amr_nx_graph.add_node(child_idx, instance=concept_nx_graph.node[child]['instance'], postprocess=True)
else:
amr_nx_graph.add_node(child_idx, instance=concept_nx_graph.node[child]['instance'], postprocess=False)
amr_nx_graph.add_edge(nx_parent, child_idx, relation=concept_nx_graph[parent][child][0]['relation'])
amr_nx_graph, count = update_graph(amr_nx_graph, concept_nx_graph, child_idx, child, count)
return amr_nx_graph, count
def post_process(amr_nx_graph, concept_graph_fragment_dict):
all_nodes = amr_nx_graph.nodes()
count = 1
for node in all_nodes:
node_concept = amr_nx_graph.node[node]['instance']
if node_concept in concept_graph_fragment_dict.keys():
concept_nx_graph = concept_graph_fragment_dict[node_concept]
concept_nx_graph_root = nx.topological_sort(concept_nx_graph)[0]
amr_nx_graph.node[node]['instance'] = concept_nx_graph.node[concept_nx_graph_root]['instance']
amr_nx_graph.node[node]['postprocess'] = False
amr_nx_graph, count = update_graph(amr_nx_graph, concept_nx_graph, node, concept_nx_graph_root, count)
else:
parts = node_concept.split('_')
if len(parts) >= 3 and parts[1] == 'name':
#new_reverse_map_dict[each_node] = x[0]
amr_nx_graph.node[node]['instance'] = parts[0]
name_node_idx = 'n'+str(count)
count += 1
amr_nx_graph.add_node(name_node_idx, instance='name', postprocess=False)
amr_nx_graph.add_edge(node, name_node_idx, relation='name')
subcount = 1
for child in parts[2:]:
child_idx = 'n'+str(count)
count += 1
amr_nx_graph.add_node(child_idx, instance=child, postprocess=True)
amr_nx_graph.add_edge(name_node_idx, child_idx, relation='op'+str(subcount))
subcount += 1
elif len(parts) > 1:
amr_nx_graph.node[node]['instance'] = parts[0]
for part in parts[1:]:
name_node_idx = 'n'+str(count)
count += 1
amr_nx_graph.add_node(name_node_idx, instance=part, postprocess=False)
amr_nx_graph.add_edge(node, name_node_idx, relation='op1') #TODO: get the k-best set of relation from preprocessing
'''
elif len(parts) == 4 and parts[0] == 'date-entity':
amr_nx_graph.node[node]['instance'] = parts[0]
if parts[1] != 'X':
child_idx = 'n'+str(count)
count += 1
amr_nx_graph.add_node(child_idx, instance=parts[1], postprocess=True)
amr_nx_graph.add_edge(node, child_idx, relation='year')
if parts[2] != 'X':
child_idx = 'n'+str(count)
count += 1
amr_nx_graph.add_node(child_idx, instance=parts[2], postprocess=True)
amr_nx_graph.add_edge(node, child_idx, relation='month')
if parts[3] != 'X':
child_idx = 'n'+str(count)
count += 1
amr_nx_graph.add_node(child_idx, instance=parts[3], postprocess=True)
amr_nx_graph.add_edge(node, child_idx, relation='date')
'''
return amr_nx_graph
def to_nx_graph(all_heads, all_tags, all_concepts, concepts_dict, relations_dict):
#print all_heads
#print all_tags
#print all_concepts
amr_roots = []
amr_nx_graph = nx.MultiDiGraph()
for idx in range(1, len(all_concepts)):
concept = all_concepts[idx]
if concept == NULL_CONCEPT:
continue
amr_nx_graph.add_node(idx, instance=concepts_dict[concept], postprocess=False)
if 0 in all_heads[idx]: #this is the root
amr_roots.append(idx)
continue #so don't add any edge
for i, parent in enumerate(all_heads[idx]):
amr_nx_graph.add_edge(parent, idx, relation=relations_dict[all_tags[idx][i]])
return amr_nx_graph, amr_roots
shortname_dict = {}
def get_amr_string(root, amr_nx_graph, tab_levels=1):
amr_string = ""
#print amr_nx_graph.successors(root)
global shortname_dict
for child in amr_nx_graph.successors(root):
if not child in shortname_dict.keys():
size = len(shortname_dict.keys())
child_amr_string = get_amr_string(child, amr_nx_graph, tab_levels+1)
shortname_dict[child] = "c"+str(size)
amr_string += "\t"*tab_levels + "\t:{0} ".format(amr_nx_graph[root][child][0]['relation']) + child_amr_string
else:
amr_string += "\t"*tab_levels + ":{0} {1}\n".format(amr_nx_graph[root][child][0]['relation'], shortname_dict[child])
if not root in shortname_dict.keys():
size = len(shortname_dict.keys())
shortname_dict[root] = "c"+str(size)
if amr_nx_graph.node[root]['postprocess'] == True: #postprocessed node so don't add shortname
amr_string = "{0} \n".format(amr_nx_graph.node[root]['instance'].replace("/", ""))
else:
amr_string = "({0} / {1}\n".format(shortname_dict[root], amr_nx_graph.node[root]['instance'].replace("/", "")) + amr_string + ")"
else:
amr_string = "{0}".format(amr_nx_graph.node[root]['instance'].replace("/", ""))
return amr_string
def print_nx_graph(nx_graph, amr_roots, output_amr_file):
#print amr_nx_graph.nodes()
#print amr_nx_graph.edges()
#print amr_root
#pdb.set_trace()
if not amr_roots: #Only null concepts predicted
amr_nx_graph.add_node(0, instance='multi-sentence', parents=None, postprocess=False)
output_amr_file.write(get_amr_string(0, amr_nx_graph))
elif len(amr_roots) > 1:
amr_nx_graph.add_node(0, instance='multi-sentence', parents=None, postprocess=False)
for i, amr_root in enumerate(amr_roots):
amr_nx_graph.add_edge(0, amr_root, relation='snt'+str(i+1))
output_amr_file.write(get_amr_string(0, amr_nx_graph))
else:
output_amr_file.write(get_amr_string(amr_roots[0], amr_nx_graph))
output_amr_file.write("\n")
output_amr_file.write("\n")
if __name__ == "__main__":
if len(sys.argv) < 2:
print "usage: vw_pred_to_amr.py <data.pred> <all_concepts.p> <all_relations.p> <concept_graph_fragment_dict.p> <output_amr_file>"
sys.exit(0)
vw_pred_file = open(sys.argv[1], 'r')
concepts_dict = p.load(open(sys.argv[2], 'rb'))
relations_dict = p.load(open(sys.argv[3], 'rb'))
concept_graph_fragment_dict = p.load(open(sys.argv[4], 'rb'))
output_amr_file = open(sys.argv[5], 'w')
all_heads = [[0]]
all_tags = [[0]]
all_concepts = [0]
global shortname_dict
for line in vw_pred_file.readlines():
line = line.strip("\n")
values = line.split(':')
if not values[0].isdigit() or not values[1].isdigit() or not line:
if all_heads:
amr_nx_graph, amr_roots = to_nx_graph(all_heads, all_tags, all_concepts, concepts_dict, relations_dict)
amr_nx_graph = post_process(amr_nx_graph, concept_graph_fragment_dict)
print_nx_graph(amr_nx_graph, amr_roots, output_amr_file)
all_heads = [[0]]
all_tags = [[0]]
all_concepts = [0]
shortname_dict = {}
amr_root = []
else:
values = [int(v.strip()) for v in values]
heads = [values[0]]
tags = [values[1]]
concept = values[2]
for i in range(3, len(values), 2):
heads.append(values[i])
tags.append(values[i+1])
all_heads.append(heads)
all_tags.append(tags)
all_concepts.append(concept)
|
[
"raosudha@umiacs.umd.edu"
] |
raosudha@umiacs.umd.edu
|
959957ffa9f4cf7e4062352f73c90a7ff06c7e0d
|
1796043fc26c958b8fc45d9c058e382473c4f3af
|
/Fabio 01 Parte 02/f1_p2_q11_media_tresnumeros.py
|
91b7d9e889f37d6676a565d66ccb3e47c0964256
|
[] |
no_license
|
Lucakurotaki/ifpi-ads-algoritmos2020
|
a69adec27dbb10aceab1bc7038a0b56a760f99d1
|
34d5fedd5825a85404cf9340e42be618981679c1
|
refs/heads/master
| 2022-03-22T04:44:14.211359
| 2022-02-19T18:48:36
| 2022-02-19T18:48:36
| 246,585,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
#Entrada
num1 = int(input("Digite o primeiro número: "))
num2 = int(input("Digite o segundo número: "))
num3 = int(input("Digite o terceiro número: "))
#Processamento
media = (num1+num2+num3)/3
#Saída
print("A média aritmética dos números {}, {} e {} é: {}".format(num1,num2,num3,media))
|
[
"noreply@github.com"
] |
Lucakurotaki.noreply@github.com
|
e10541d0e1cc3cb3b76d033be5c42c2e03b341c9
|
64ada708c3ee39c624a223fa4881ce3689041606
|
/Appendix/maze_game.py
|
194f7ce6a1e7dfd28431a0db34a8a95eaf28369b
|
[] |
no_license
|
kimcaptin/PythonGame_1
|
1173cf3ac356d29b1cb254b1607bd4528e0a28cc
|
af32318bf1e6ea73aa00fc4c72d07e1a5d7c5300
|
refs/heads/main
| 2023-01-04T05:46:02.782910
| 2020-10-28T06:53:30
| 2020-10-28T06:53:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,421
|
py
|
import tkinter
import tkinter.messagebox
idx = 0
tmr = 0
stage = 1
ix = 0
iy = 0
key = 0
def key_down(e):
global key
key = e.keysym
def key_up(e):
global key
key = 0
maze = [[], [], [], [], [], [], [], []]
def stage_data():
global ix, iy
global maze # 리스트 전체를 변경하는 경우 전역 변수 선언 필요
if stage == 1:
ix = 1
iy = 1
# 0: 길, 1: 칠해진 통로, 9: 벽
maze = [
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 0, 9, 0, 0, 0, 9, 0, 0, 9],
[9, 0, 9, 0, 9, 0, 9, 0, 0, 9],
[9, 0, 9, 0, 9, 0, 9, 0, 9, 9],
[9, 0, 9, 0, 9, 0, 9, 0, 0, 9],
[9, 0, 9, 0, 9, 0, 9, 9, 0, 9],
[9, 0, 0, 0, 9, 0, 0, 0, 0, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
]
if stage == 2:
ix = 8
iy = 6
maze = [
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 0, 0, 0, 0, 0, 9, 0, 9],
[9, 0, 0, 9, 9, 0, 0, 9, 0, 9],
[9, 0, 0, 9, 9, 0, 0, 9, 0, 9],
[9, 9, 9, 9, 9, 0, 0, 9, 0, 9],
[9, 9, 9, 9, 9, 0, 0, 0, 0, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
]
if stage == 3:
ix = 3
iy = 3
maze = [
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 0, 0, 0, 0, 9, 9, 9],
[9, 9, 0, 0, 0, 0, 0, 0, 9, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 9, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 9, 9],
[9, 9, 0, 0, 0, 0, 0, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
]
if stage == 4:
ix = 4
iy = 3
maze = [
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 0, 0, 9, 0, 0, 0, 0, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 0, 9, 0, 0, 0, 9, 0, 9],
[9, 0, 0, 0, 0, 0, 0, 9, 0, 0],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
]
if stage == 5:
ix = 1
iy = 6
maze = [
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 9, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 0, 0, 0, 0, 9, 9, 0, 9],
[9, 0, 0, 0, 0, 9, 9, 9, 0, 9],
[9, 0, 0, 9, 0, 0, 0, 0, 0, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
]
maze[iy][ix] = 1
def draw_bg():
for y in range(8):
for x in range(10):
gx = 80 * x
gy = 80 * y
if maze[y][x] == 0:
cvs.create_rectangle(gx, gy, gx + 80, gy + 80, fill="white", width=0, tag="BG")
if maze[y][x] == 9:
cvs.create_image(gx + 40, gy + 40, image=wall, tag="BG")
cvs.create_text(120, 40, text="STAGE " + str(stage), fill="white", font=("Times New Roman", 30, "bold"), tag="BG")
gx = 80 * ix
gy = 80 * iy
cvs.create_rectangle(gx, gy, gx + 80, gy + 80, fill="pink", width=0, tag="BG")
cvs.create_image(gx + 60, gy + 20, image=pen, tag="PEN")
def erase_bg():
cvs.delete("BG")
cvs.delete("PEN")
def move_pen():
global idx, tmr, ix, iy, key
bx = ix
by = iy
if key == "Left" and maze[iy][ix - 1] == 0:
ix = ix - 1
if key == "Right" and maze[iy][ix + 1] == 0:
ix = ix + 1
if key == "Up" and maze[iy - 1][ix] == 0:
iy = iy - 1
if key == "Down" and maze[iy + 1][ix] == 0:
iy = iy + 1
if ix != bx or iy != by:
maze[iy][ix] = 2
gx = 80 * ix
gy = 80 * iy
cvs.create_rectangle(gx, gy, gx + 80, gy + 80, fill="pink", width=0, tag="BG")
cvs.delete("PEN")
cvs.create_image(gx + 60, gy + 20, image=pen, tag="PEN")
if key == "g" or key == "G" or key == "Shift_L":
key = 0
ret = tkinter.messagebox.askyesno("포기", "다시 하겠습니까?")
# root.focus_force() # for Mac
if ret == True:
stage_data()
erase_bg()
draw_bg()
def count_tile():
cnt = 0
for y in range(8):
for x in range(10):
if maze[y][x] == 0:
cnt = cnt + 1
return cnt
def game_main():
global idx, tmr, stage
if idx == 0: # 초기화
stage_data()
draw_bg()
idx = 1
if idx == 1: # 펜 이동과 클리어 판정
move_pen()
if count_tile() == 0:
txt = "STAGE CLEAR"
if stage == 5:
txt = "ALL STAGE CLEAR!"
cvs.create_text(400, 320, text=txt, fill="white", font=("Times New Roman", 40, "bold"), tag="BG")
idx = 2
tmr = 0
if idx == 2: # 스테이지 클리어
tmr = tmr + 1
if tmr == 30:
if stage < 5:
stage = stage + 1
stage_data()
erase_bg()
draw_bg()
idx = 1
root.after(200, game_main)
root = tkinter.Tk()
root.title("한 번에 미로 칠하기 게임")
root.resizable(False, False)
root.bind("<KeyPress>", key_down)
root.bind("<KeyRelease>", key_up)
cvs = tkinter.Canvas(root, width=800, height=640)
cvs.pack()
pen = tkinter.PhotoImage(file="pen.png")
wall = tkinter.PhotoImage(file="wall.png")
game_main()
root.mainloop()
|
[
"jeipubmanager@gmail.com"
] |
jeipubmanager@gmail.com
|
1a84f5a49bcf0eab3c43b3ba1db9666ebd94af29
|
59fbeea017110472a788218db3c6459e9130c7fe
|
/rotate-list/rotate-list.py
|
be8a63c32a60f946bd7d6f0d6a3529f0a007cea7
|
[] |
no_license
|
niufenjujuexianhua/Leetcode
|
82b55d9382bc9f63f4d9da9431194e20a4d299f1
|
542c99e038d21429853515f62af51a77deaa4d9c
|
refs/heads/master
| 2022-04-27T16:55:00.035969
| 2022-03-10T01:10:04
| 2022-03-10T01:10:04
| 79,742,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 834
|
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head or not head.next:
return head
d = ListNode
d.next = head
cur = head
cnt = 0
while cur:
cnt += 1
cur = cur.next
k = k % cnt
if k == 0:
return head
s = f = d
for _ in range(k):
f = f.next
while f and f.next:
s, f = s.next, f.next
newhead = s.next
s.next = None
f.next = d.next
return newhead
|
[
"wutuo123@yeah.net"
] |
wutuo123@yeah.net
|
73585cbc1b80617c5e8e1b4b75573ae0b261b5a9
|
aaa22c7aa8d8c6fb2a9d489252d72387c914cfac
|
/orders/migrations/0001_initial.py
|
7091656e024adbcbb40069c9f3359ffdc35672db
|
[] |
no_license
|
MohamedHany2002/online-shop
|
dccd55fef192cb94b57a5eca126a85c38c71c0fa
|
e8db42c17ea6b1cb0b08e6ff0e2e367ce9a118be
|
refs/heads/master
| 2022-09-04T16:24:40.912664
| 2020-05-12T14:53:43
| 2020-05-12T14:53:43
| 263,360,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
# Generated by Django 3.0.2 on 2020-04-11 14:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cart', '0009_cart_user'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.CharField(blank=True, max_length=120)),
('shipping_total', models.DecimalField(decimal_places=2, default=5.99, max_digits=10)),
('order_total', models.DecimalField(decimal_places=2, default=0.0, max_digits=10)),
('status', models.CharField(choices=[('created', 'Created'), ('paid', 'Paid'), ('shipped', 'Shipped'), ('refunded', 'Refunded')], max_length=120)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cart.cart')),
],
),
]
|
[
"goldenhany94@gmail.com"
] |
goldenhany94@gmail.com
|
92616aa2a4472acc2e3614862941dd9f05ea1934
|
b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a
|
/examples/pwr_run/checkpointing/throughput/final1/job11.py
|
0e1c795e3445c10eb7015ace6a5a0516b3146e91
|
[
"MIT"
] |
permissive
|
boringlee24/keras_old
|
3bf7e3ef455dd4262e41248f13c04c071039270e
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
refs/heads/master
| 2021-11-21T03:03:13.656700
| 2021-11-11T21:57:54
| 2021-11-11T21:57:54
| 198,494,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,614
|
py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 64
args_lr = 0.008
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final1/' + job_name + '*'
total_epochs = 8
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final1/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
[
"baolin.li1994@gmail.com"
] |
baolin.li1994@gmail.com
|
0787e5ef5b14556ad5998e2475bd8a2883204da7
|
08ddce92744c78432b69409d197ad1393ca685aa
|
/api/novel_list_paihangbang.py
|
48719b3f009c55127001016d87a147d695a2814d
|
[] |
no_license
|
baliguan163/PythonDemo
|
71255eb21850134b4b6afb2eeed948cc34326e7a
|
c4fe1b6ea36bec2c531244ef95c809e17b64b727
|
refs/heads/master
| 2021-01-02T08:13:18.809740
| 2019-05-19T16:28:16
| 2019-05-19T16:28:16
| 98,963,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,256
|
py
|
__author__ = 'IBM'
#coding:utf-8
import requests
import time
from bs4 import BeautifulSoup
#抓取网页的函数
def get_html(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = 'utf-8'
return r.text
except:
return " ERROR "
def get_content(url):
'''
爬取每一类型小说排行榜,按顺序写入文件,文件内容为 小说名字+小说链接
将内容保存到列表并且返回一个装满url链接的列表
'''
url_list = []
html = get_html(url)
soup = BeautifulSoup(html, 'lxml')
# 由于小说排版的原因,历史类和完本类小说不在一个div里
category_list = soup.find_all('div', class_='index_toplist mright mbottom')
#历史类和完本类小说
history_finished_list = soup.find_all('div', class_='index_toplist mbottom')
for cate in category_list:
name = cate.find('div', class_='toptab').span.string
with open('novel_list.csv', 'a+') as f:
f.write("\n小说种类:{} \n".format(name))
print('-------------------小说种类1:',name,'-------------------')
# 我们直接通过style属性来定位总排行榜
general_list = cate.find(style='display: block;')
# 找到全部的小说名字,发现他们全部都包含在li标签之中
book_list = general_list.find_all('li')
# 循环遍历出每一个小说的的名字,以及链接
for book in book_list:
link = 'http://www.qu.la/' + book.a['href']
title = book.a['title']
# 我们将所有文章的url地址保存在一个列表变量里
url_list.append(link)
# 这里使用a模式,防止清空文件
with open('novel_list.csv', 'a') as f:
f.write("小说名:{:<} \t 小说地址:{:<} \n".format(title, link))
print('小说名:',title,' 小说地址:',link)
for cate in history_finished_list:
name = cate.find('div', class_='toptab').span.string
with open('novel_list.csv', 'a') as f:
f.write("\n小说种类:{} \n".format(name))
print('-------------------小说种类2:',name,'-------------------')
general_list = cate.find(style='display: block;')
book_list = general_list.find_all('li')
for book in book_list:
link = 'http://www.qu.la/' + book.a['href']
title = book.a['title']
url_list.append(link)
with open('novel_list.csv', 'a') as f:
f.write("小说名:{:<} \t 小说地址:{:<} \n".format(title, link))
print('小说名:',title,' 小说地址:',link)
return url_list
def get_txt_url(url):
'''
获取该小说每个章节的url地址:并创建小说文件
'''
url_list = []
html = get_html(url)
soup = BeautifulSoup(html, 'lxml')
lista = soup.find_all('dd')
txt_name = soup.find('h1').text.strip()
with open('novel/{}.txt'.format(txt_name), "a+", encoding='utf-8') as f:
f.write('小说标题:{} \n'.format(txt_name))
for url in lista:
url_list.append('http://www.qu.la/' + url.a['href'])
return url_list, txt_name
def get_one_txt(url, txt_name):
'''
获取小说每个章节的文本
并写入到本地
'''
#print('下载小说:',txt_name,' ',url)
html = get_html(url).replace('<br/>', '\n')
soup = BeautifulSoup(html, 'lxml')
try:
txt = soup.find('div', id='content').text.replace('chaptererror();', '')
title = soup.find('title').text
with open('novel/{}.txt'.format(txt_name), "a",encoding='utf-8') as f:
f.write(title + '\n\n')
f.write(txt)
print('当前小说:{} 当前章节{} 已经下载完毕'.format(txt_name, title))
except:
print('someting wrong')
url = 'http://www.qu.la/paihangbang/'
if __name__ == "__main__":
url_list = get_content(url)
for url in url_list:
one_novel_url_list = get_txt_url(url)
#print('one_novel_url_list:',one_novel_url_list)
for url in one_novel_url_list[0]:
get_one_txt(url,one_novel_url_list[1])
|
[
"baliguan163@163.com"
] |
baliguan163@163.com
|
17abdcd68276fa7e209abb00e5e0f0fd4af4c524
|
9680c27718346be69cf7695dba674e7a0ec662ca
|
/Numpy/Numpy Arange Function - Creating NumPy Arrays.py
|
d5629978efe1c2f1af3df9713de2c0e6dd231f95
|
[] |
no_license
|
Md-Monirul-Islam/Python-code
|
5a2cdbe7cd3dae94aa63298b5b0ef7e0e31cd298
|
df98f37dd9d21784a65c8bb0e46d47a646259110
|
refs/heads/main
| 2023-01-19T05:15:04.963904
| 2020-11-19T06:10:09
| 2020-11-19T06:10:09
| 314,145,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
import numpy as np
#print(help(np.arange))
print(np.arange(1,10))
print(np.arange(3.))
print(np.arange(1,20,2))
print(np.arange(20, dtype= "complex"))
print(np.arange(1,10,2,dtype= "float"))
|
[
"61861844+Md-Monirul-Islam@users.noreply.github.com"
] |
61861844+Md-Monirul-Islam@users.noreply.github.com
|
894fe800d424952f5cfadd3b5b2dc93ad384697c
|
72ab559d5ce5f02b5ba9b48fa5e51ec69eca34a7
|
/jaraco/net/http/cookies.py
|
3de1e785e8b34cfe29d4aa08ad1ca3fb324db1ee
|
[
"MIT"
] |
permissive
|
jaraco/jaraco.net
|
a961137b51314faa0c8dda04f71328ca587e9f36
|
d2218af49459d38e447c9d977e06f29452f88ff9
|
refs/heads/main
| 2023-08-31T16:49:35.366220
| 2023-08-06T23:44:43
| 2023-08-06T23:44:43
| 53,204,002
| 0
| 1
|
MIT
| 2022-12-06T02:52:01
| 2016-03-05T14:01:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,464
|
py
|
import pathlib
import collections
import http.cookiejar
import contextlib
import jsonpickle
class Shelf(collections.abc.MutableMapping):
"""
Similar to Python's shelve.Shelf, implements a persistent
dictionary using jsonpickle.
>>> fn = getfixture('tmp_path') / 'shelf.json'
>>> shelf = Shelf(fn)
>>> shelf['foo'] = 'bar'
>>> copy = Shelf(fn)
>>> copy['foo']
'bar'
>>> shelf['bar'] = 'baz'
>>> Shelf(fn)['bar']
'baz'
"""
def __init__(self, filename):
self.filename = pathlib.Path(filename)
self.store = dict()
with contextlib.suppress(Exception):
self._load()
def _load(self):
self.store = jsonpickle.decode(self.filename.read_text(encoding='utf-8'))
def _save(self):
self.filename.write_text(jsonpickle.encode(self.store), encoding='utf-8')
def __getitem__(self, *args, **kwargs):
return self.store.__getitem__(*args, **kwargs)
def __setitem__(self, *args, **kwargs):
self.store.__setitem__(*args, **kwargs)
self._save()
def __delitem__(self, *args, **kwargs):
self.store.__delitem__(*args, **kwargs)
self._save()
def __iter__(self):
return self.store.__iter__()
def __len__(self):
return self.store.__len__()
class ShelvedCookieJar(http.cookiejar.CookieJar):
"""
Cookie jar backed by a shelf.
Automatically persists cookies to disk.
"""
def __init__(self, shelf: Shelf, **kwargs):
super().__init__(**kwargs)
self._cookies = self.shelf = shelf
@classmethod
def create(cls, root: pathlib.Path = pathlib.Path(), name='cookies.json', **kwargs):
return cls(Shelf(root / name), **kwargs)
def set_cookie(self, cookie):
with self._cookies_lock:
self.shelf.setdefault(cookie.domain, {}).setdefault(cookie.path, {})[
cookie.name
] = cookie
self.shelf._save()
def clear(self, domain=None, path=None, name=None):
super().clear(domain, path, name)
if path is not None or name is not None:
self.shelf._save()
def get(self, name, default=None):
matches = (
cookie.value
for domain in self.shelf
for path in self.shelf[domain]
for cookie in self.shelf[domain][path].values()
if cookie.name == name
)
return next(matches, default)
|
[
"jaraco@jaraco.com"
] |
jaraco@jaraco.com
|
f09015ff8ac994c61b10c2fb321256ff3d7e0692
|
3c01d7928029e74a19d646f5a40b3bf099b281a7
|
/typeshed/stubs/freezegun/freezegun/api.pyi
|
df10e569ae1dd5611b057fda44e39fd4cd45791a
|
[
"MIT"
] |
permissive
|
arpancodes/protectsql
|
f3ced238c103fca72615902a9cb719c44ee2b5ba
|
6392bb7a86d1f62b86faf98943a302f7ea3fce4c
|
refs/heads/main
| 2023-08-07T16:33:57.496144
| 2021-09-24T19:44:51
| 2021-09-24T19:44:51
| 409,894,807
| 0
| 1
|
MIT
| 2021-09-24T19:44:52
| 2021-09-24T08:46:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,243
|
pyi
|
from collections.abc import Awaitable, Callable, Iterator, Sequence
from datetime import date, datetime, timedelta
from numbers import Real
from typing import Any, Type, TypeVar, Union, overload
_T = TypeVar("_T")
_Freezable = Union[str, datetime, date, timedelta]
class TickingDateTimeFactory(object):
def __init__(self, time_to_freeze: datetime, start: datetime) -> None: ...
def __call__(self) -> datetime: ...
class FrozenDateTimeFactory(object):
def __init__(self, time_to_freeze: datetime) -> None: ...
def __call__(self) -> datetime: ...
def tick(self, delta: float | Real | timedelta = ...) -> None: ...
def move_to(self, target_datetime: _Freezable | None) -> None: ...
class StepTickTimeFactory(object):
def __init__(self, time_to_freeze: datetime, step_width: float) -> None: ...
def __call__(self) -> datetime: ...
def tick(self, delta: timedelta | None = ...) -> None: ...
def update_step_width(self, step_width: float) -> None: ...
def move_to(self, target_datetime: _Freezable | None) -> None: ...
class _freeze_time:
def __init__(
self,
time_to_freeze_str: _Freezable | None,
tz_offset: float,
ignore: Sequence[str],
tick: bool,
as_arg: bool,
auto_tick_seconds: float,
) -> None: ...
@overload
def __call__(self, func: Type[_T]) -> Type[_T]: ...
@overload
def __call__(self, func: Callable[..., Awaitable[_T]]) -> Callable[..., Awaitable[_T]]: ...
@overload
def __call__(self, func: Callable[..., _T]) -> Callable[..., _T]: ...
def __enter__(self) -> Any: ...
def __exit__(self, *args: Any) -> None: ...
def start(self) -> Any: ...
def stop(self) -> None: ...
def decorate_class(self, klass: Type[_T]) -> _T: ...
def decorate_coroutine(self, coroutine: _T) -> _T: ...
def decorate_callable(self, func: Callable[..., _T]) -> Callable[..., _T]: ...
def freeze_time(
time_to_freeze: _Freezable | Callable[..., _Freezable] | Iterator[_Freezable] | None = ...,
tz_offset: float | None = ...,
ignore: Sequence[str] | None = ...,
tick: bool | None = ...,
as_arg: bool | None = ...,
auto_tick_seconds: float | None = ...,
) -> _freeze_time: ...
|
[
"arpanforbusiness@gmail.com"
] |
arpanforbusiness@gmail.com
|
ca192c64fa93c17fa7c3b701c680a16935c6d89e
|
a86bca3e88fc3012bc9805c74c2e752262370326
|
/AI/tab_text_dataset.py
|
d61515e1e5c05bde943591f38dac98c22c8167a7
|
[
"MIT"
] |
permissive
|
osamhack2021/AI_NoYoutube_60Duo
|
4921f7c838776305d8dc00d6ceb04b2190565916
|
c1e34b7b506b43c9be6c39da3211fac49bfbcd14
|
refs/heads/main
| 2023-08-11T19:24:45.560000
| 2021-10-13T15:00:38
| 2021-10-13T15:00:38
| 405,925,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
# pip install pypiwin32
import win32gui
import time
import csv
file_name = 'tab_text_dataset.csv'
browser_list = [' - Chrome', ' - Internet Explorer']
window = ''
while True:
current_window = win32gui.GetWindowText(win32gui.GetForegroundWindow())
if window != current_window:
window = current_window
print(window)
for browser in browser_list:
if browser in window:
window = window.replace(browser,'')
with open(file_name, 'a', newline='') as f:
wr = csv.writer(f, lineterminator='\n')
wr.writerow([window])
f.close()
window = window + browser
time.sleep(1)
|
[
"noreply@github.com"
] |
osamhack2021.noreply@github.com
|
348594c84f7e498712d4b049c30591da6b52c02f
|
2b912b088683e2d4d1fa51ebf61c4e53c5058847
|
/.PyCharmCE2017.1/system/python_stubs/-1247971765/nis.py
|
d99a03de2f9f3e21069295858335ebf44134f40a
|
[] |
no_license
|
ChiefKeith/pycharmprojects
|
1e1da8288d85a84a03678d2cae09df38ddb2f179
|
67ddcc81c289eebcfd0241d1435b28cd22a1b9e0
|
refs/heads/master
| 2021-07-13T00:52:19.415429
| 2017-10-08T23:04:39
| 2017-10-08T23:04:39
| 106,216,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
# encoding: utf-8
# module nis
# from /usr/lib/python3.4/lib-dynload/nis.cpython-34m-arm-linux-gnueabihf.so
# by generator 1.145
""" This module contains functions for accessing NIS maps. """
# no imports
# functions
def cat(map, domain=None): # real signature unknown; restored from __doc__
"""
cat(map, domain = defaultdomain)
Returns the entire map as a dictionary. Optionally domain can be
specified but it defaults to the system default domain.
"""
pass
def get_default_domain(): # real signature unknown; restored from __doc__
"""
get_default_domain() -> str
Corresponds to the C library yp_get_default_domain() call, returning
the default NIS domain.
"""
return ""
def maps(domain=None): # real signature unknown; restored from __doc__
"""
maps(domain = defaultdomain)
Returns an array of all available NIS maps within a domain. If domain
is not specified it defaults to the system default domain.
"""
pass
def match(key, map, domain=None): # real signature unknown; restored from __doc__
"""
match(key, map, domain = defaultdomain)
Corresponds to the C library yp_match() call, returning the value of
key in the given map. Optionally domain can be specified but it
defaults to the system default domain.
"""
pass
# classes
class error(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
# variables with complex values
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is ''
|
[
"kmarlin@dtcc.edu"
] |
kmarlin@dtcc.edu
|
48f0170bf6fbdde92cbc13bff5b74a79d5d3677b
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_Quantization/trend_LinearTrend/cycle_5/ar_12/test_artificial_1024_Quantization_LinearTrend_5_12_0.py
|
b85ea2797b9c7c86df0eee3743a87b299676b7b0
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 275
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 5, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
80ae8eed0fd27a1c218770252d327ce12836c056
|
b812afe2b6e810881f5b0c66e5fe49b88adcd816
|
/unsupervised_learning/0x03-hyperparameter_tuning/1-gp.py
|
818d56e43ff1c8d6189cc8a3ac4d605d7fa0d856
|
[] |
no_license
|
AhmedOmi/holbertonschool-machine_learning
|
6b44b1957b6cee291d6dabd19a5bbe535c83881f
|
f887cfd48bb44bc4ac440e27014c82390994f04d
|
refs/heads/master
| 2023-07-28T07:45:09.886422
| 2021-09-13T13:05:14
| 2021-09-13T13:05:14
| 317,320,504
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
#!/usr/bin/env python3
"""Predict mean and standard deviation of points in Gaussian Process"""
import numpy as np
class GaussianProcess:
"""Hold state and data of a gaussian process"""
def __init__(self, X_init, Y_init, l=1, sigma_f=1):
self.X = X_init
self.Y = Y_init
self.l = l
self.sigma_f = sigma_f
self.K = self.kernel(X_init, X_init)
def kernel(self, X1, X2):
"""Radial Basis Function Kernel"""
return pow(self.sigma_f, 2) * np.exp(pow(X1 - X2.T, 2) /
-2 / pow(self.l, 2))
def predict(self, X_s):
"""Predict mean and standard deviation of points in Gaussian Process"""
K_s = self.kernel(X_s, self.X)
K_i = np.linalg.inv(self.K)
mu = np.matmul(np.matmul(K_s, K_i), self.Y)[:, 0]
K_s2 = self.kernel(X_s, X_s)
sigma = K_s2 - np.matmul(np.matmul(K_s, K_i), K_s.T)
return mu, np.diagonal(sigma)
|
[
"ahmedomarmiledi@gmail.com"
] |
ahmedomarmiledi@gmail.com
|
3943c96057143acaa2de8e328f572962c5b864dc
|
4007632edd395d243bca022418848a2ff54409c8
|
/123.py
|
739ac163e85499ece4b6557edddc23705945ca8a
|
[] |
no_license
|
549982170/python_learning
|
d80a9403cbe2eb8304aba50ff373b2b67df095e2
|
2c3f73718e0a6d9d4923a2e0f22ff2d4230357e9
|
refs/heads/master
| 2021-06-22T04:32:06.286691
| 2020-12-10T03:29:56
| 2020-12-10T03:29:56
| 101,596,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
#!/usr/bin/env python
# encoding: utf-8
import time
from selenium import webdriver
# create a new Firefox session
driver = webdriver.Firefox()
#time.sleep(3)
#driver.maximize_window()
# navigate to the application home page
driver.get("http://moxian.com/")
driver.find_elements_by_xpath("/html/body/div[1]/div[1]/div/p/a[1]")
time.sleep(3)
driver.close()
|
[
"549982170@qq.com"
] |
549982170@qq.com
|
80a38c34283873686e582d5788cbeeadaf9a19d8
|
2e3d63726c1d05b73b9cc22e5bcbead30246a8dc
|
/facepad_app/migrations/0005_auto_20160321_2211.py
|
a3c0a96ba9d6a736b8075f2498310ca66564ec30
|
[] |
no_license
|
rolycg/tiny_social_network
|
041f6e4ab503bb82eca4cf1efb436d3b5250343a
|
e7ec45d053d291d53bd9d58bbb882b4b3edb6355
|
refs/heads/master
| 2021-01-10T04:27:16.344700
| 2016-03-23T18:19:49
| 2016-03-23T18:19:49
| 54,581,800
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-22 04:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facepad_app', '0004_auto_20160321_1853'),
]
operations = [
migrations.AlterField(
model_name='simpleuser',
name='avatar',
field=models.ImageField(blank=True, null=True, upload_to='avatars/', verbose_name='Avatar'),
),
]
|
[
"rolycg89@gmail.com"
] |
rolycg89@gmail.com
|
b5c6b72e3cdb5fcf8c1a97044664e4ffdb313025
|
1e177ebdcb470f738c058606ac0f86a36085f661
|
/Pico/MicroPython/mqtt/oneWire01.py
|
023100ff032111861ebea42335dfc18ee346c6f9
|
[] |
no_license
|
robingreig/raspi-git
|
5cbdd295c1048a0571aa2c2f8576438269439f07
|
7373bf94557d7a88c8f343362ba64f9cd19c8ce7
|
refs/heads/master
| 2023-08-31T03:16:17.286700
| 2023-08-26T11:54:23
| 2023-08-26T11:54:23
| 16,873,881
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,461
|
py
|
import time
import network
import machine
from machine import Pin
from onewire import OneWire
from ds18x20 import DS18X20
import binascii
ds = DS18X20(OneWire(Pin(16)))
roms = ds.scan()
sensor = (0x28,0xff,0xa1,0x58,0x74,0x16,0x04,0x24)
while True:
try:
# convert temp in DS18B20
ds.convert_temp()
# have to wait at least 750mS after conver
time.sleep_ms(1000)
# read temp from the sensor
temp1 = ds.read_temp(sensor)
print('sensor temp1 = ',temp1)
time.sleep(2)
# format the value to 2 decimal places
temp1 = "%3.2f" % temp1
print('Formatted temp1 = ',temp1)
time.sleep(2)
# roms is ds.scan()
for rom in roms:
print('rom = ',rom)
# convert from bytearray to bytes
str1 = bytes(rom)
print('Type of str1 = ',(type(str1)))
print('str1 = ',str1)
# convert from bytes to hex string
str2 = binascii.hexlify(rom)
print('Type of str2 = ',(type(str2)))
print('str2 = ',str2)
# remove the b'
str3 = str2.decode()
print('Type of str3 = ',(type(str3)))
print('str3 = ',str3)
# Read the temp from the sensor
temp2 = (ds.read_temp(rom))
print('temp2 = ',temp2)
temp2 = "%3.2f" % temp2
print('Formatted temp2 = ',temp2)
time.sleep(2)
pass
except:
print('Jumped out of Try loop')
break
|
[
"robin.greig@calalta.com"
] |
robin.greig@calalta.com
|
195be52232edb0af4b24300004a91908e4f653e4
|
c291ba4506a8998df8d7f384c911f6a0a1294001
|
/bai__83+84/BaiTapRenLuyenXuLyList.py
|
6cf3194271e88b97837cd3eb94d1f5f96c1eff96
|
[] |
no_license
|
thanh-falis/Python
|
f70804ea4a3c127dcb7738d4e7c6ddb4c5a0a9d4
|
fa9f98d18e0de66caade7c355aa6084f2d61aab3
|
refs/heads/main
| 2023-08-18T17:34:29.851365
| 2021-09-30T12:09:30
| 2021-09-30T12:09:30
| 398,952,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
"""
Viết chương trình cho phép:
- Khởi tạo list
- Thêm phần tử vào list
- Nhập K, kiểm tra K xuât hiện bao nhiêu lần trong list
- Tính tổng các số nguyên tố trong list
- Sắp xếp
- Xóa list
"""
from random import randrange
print("Chương trình xử lý list")
n = int(input("Nhập só phần tử: "))
lst = [0] * n
for i in range(n):
lst[i] = randrange(-100, 100)
print("List đa chiều ngẫu nhiên:")
print(lst)
print("Mời bạn nhập thêm số mới:")
value = int(input())
lst.append(value)
print(lst)
print("Bạn muốn đếm số nào")
k = int(input())
cnt = lst.count(k)
print(k, "Xuất hiện trong list: ", cnt, "lần")
def CheckPrime(n):
d = 0
for i in range(1, n + 1):
if n % i == 0:
d += 1
return d == 2
cnt = 0
snt = 0
for j in lst:
if CheckPrime(j):
cnt += 1
snt += j
print("Có ", cnt, "số nguyên tố trong list")
print("Tổng =", snt)
lst.sort()
print("List sau khi sort")
print(lst)
del lst
print("List sau khi xóa")
print(lst)
|
[
"thanhelma2020|@gmail.com"
] |
thanhelma2020|@gmail.com
|
2ff4e36d146af072dbdcbaa1de46ca96971cfa6e
|
c4ecc70400f3c4375dd4b2335673137dd36b72b4
|
/aggregator.py
|
da003c58f45e82acd48d5a01021b9f07bfba9137
|
[
"MIT"
] |
permissive
|
TippyFlitsUK/FarmXero
|
1bb3496d164d66c940bd3012e36e1763990ff30d
|
881b1e6648e927631b276e66a4c5287e4de2cbc1
|
refs/heads/main
| 2023-07-05T14:49:57.186130
| 2021-08-19T19:33:48
| 2021-08-19T19:33:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,668
|
py
|
# This aggregates the Table data from the scrapers and creates journals
# Also writes data to a file for accurate record keeping.
import json
import datetime
import time
import os
import sys
import argparse
import FilfoxScraper
import Addresses
import coingeckoScraper
import xeroAccounts as xa
import data_folders
try:
from xero_python.accounting import ManualJournal, ManualJournalLine
except:
print('you need to activate the environment run the following:')
print('source venv/bin/activate')
# from xero_python.accounting import AccountingApi, ManualJournal, ManualJournalLine
def nanoFilToFil(nanoFil):
return nanoFil*(10**-18)
def getJournalForDay(day, printJnl=True, archive=data_folders.JOURNAL_ARCHIVE):
walletAddress = Addresses.minerAddress
startDate = day
endDate = day + datetime.timedelta(days=1)
# Generate the miner wallet table
table = FilfoxScraper.getMessageTableForDateRange(startDate, endDate, walletAddress)
# Append transactions from the other wallets
for w in Addresses.wallets:
wTable = FilfoxScraper.getMessageTableForDateRange(startDate, endDate, w)
table += wTable
msgFn = data_folders.MESSAGE_ARCHIVE + 'msgs_' + startDate.strftime('%Y-%m-%d') + '.csv'
FilfoxScraper.writeTableToCSV(msgFn, table)
blocksWon = FilfoxScraper.getBlocksTableForDateRange(startDate, endDate, walletAddress)
blockFn = data_folders.BLOCKS_ARCHIVE + 'blocks_' + startDate.strftime('%Y-%m-%d') + '.csv'
FilfoxScraper.writeBlockTableToCSV(blockFn, blocksWon)
transfers = 0
collat = 0
minerFee = 0
burnFee = 0
slash = 0
numTransactions = 0
blockRewards = 0
numBlocksWon = 0
for r in table:
transfers = transfers + r['transfer']
collat = collat + r['collateral']
minerFee = minerFee + r['miner-fee']
burnFee = burnFee + r['burn-fee']
slash = slash + r['slash']
numTransactions = numTransactions + 1
for b in blocksWon:
blockRewards = blockRewards + int(b['win'])
numBlocksWon = numBlocksWon + 1
nCollat = "Collat: " + str(nanoFilToFil(collat)) + " FIL"
nMinerFee = "Miner Fee: " + str(nanoFilToFil(minerFee)) + " FIL"
nBurnFee = "Burn Fee: " + str(nanoFilToFil(burnFee)) + " FIL"
nSlash = "Slash: " + str(nanoFilToFil(slash)) + " FIL"
nTransfers = "Transfers: " + str(nanoFilToFil(transfers)) + " FIL"
nBlockRewards = "Block Rewards: " + str(nanoFilToFil(blockRewards)) + " FIL (" + str(numBlocksWon)+") blocks won"
nMinerBalance = "Miner Balance: " #+ str(nanoFilToFil(minerBalance)) + "FIL"
exchRate = coingeckoScraper.getFilecoinNZDPriceOnDay(day)
collatNzd = round(nanoFilToFil(collat) * exchRate, 2)
minerFeeNzd = round(nanoFilToFil(minerFee) * exchRate, 2)
burnFeeNzd = round(nanoFilToFil(burnFee) * exchRate, 2)
slashNzd = round(nanoFilToFil(slash) * exchRate, 2)
transfersNzd = -round(nanoFilToFil(transfers) * exchRate, 2)#positive transfers (into miner) come from credits therefore -ve
blockRewardsNzd = -round(nanoFilToFil(blockRewards) * exchRate, 2)#Rewards are credits therefore are -ve
minerBalanceNzd = -(transfersNzd + collatNzd + minerFeeNzd + burnFeeNzd + slashNzd + blockRewardsNzd)
jnlNarration = 'Filfox data for the day ' + startDate.strftime('%d-%m-%Y') #+ ' to ' + endDate.strftime('%d-%m-%Y')
jnlLinesAll = [
ManualJournalLine(line_amount=collatNzd, account_code=xa.COLLAT, description=nCollat),
ManualJournalLine(line_amount=minerFeeNzd, account_code=xa.MINER_FEE, description=nMinerFee),
ManualJournalLine(line_amount=burnFeeNzd, account_code=xa.BURN_FEE, description=nBurnFee),
ManualJournalLine(line_amount=slashNzd, account_code=xa.SLASH, description=nSlash),
ManualJournalLine(line_amount=transfersNzd, account_code=xa.TRANSFERS, description=nTransfers),
ManualJournalLine(line_amount=blockRewardsNzd, account_code=xa.BLOCK_REWARDS, description=nBlockRewards),
ManualJournalLine(line_amount=minerBalanceNzd, account_code=xa.MINER_BALANCE, description=nMinerBalance)
]
jnlLines = []
for l in jnlLinesAll:
if(abs(l.line_amount) >= 0.01):
jnlLines.append(l)
mj = ManualJournal(narration=jnlNarration, journal_lines=jnlLines, date=startDate)
if(archive != 'none'):
ARCHIVE_HEADER = 'date, narration, \
collat, Miner Fee, Burn Fee, Slash, Transfers, Block rewards, \
Blocks won, exch rate, \
NZD collat, NZD Miner Fee, NZD Burn Fee, NZD Slash, NZD Transfers, NZD Block rewards, NZD Balance\n'
if(os.path.exists(archive) == False):
with open(archive, 'w+') as f:
f.write(ARCHIVE_HEADER)
csvLine = startDate.strftime('%d-%m-%Y')+','+str(jnlNarration)+','+\
str(collat)+','+str(minerFee)+','+str(burnFee)+','+str(slash)+','+str(transfers)+','+str(blockRewards)+','+\
str(numBlocksWon)+','+str(exchRate)+','+\
str(collatNzd)+','+str(minerFeeNzd)+','+str(burnFeeNzd)+','+str(slashNzd)+','+str(transfersNzd)+','+str(blockRewardsNzd)+','+str(minerBalanceNzd)+'\n'
with open(archive, 'a') as f:
f.write(csvLine)
if(printJnl):
print(jnlNarration)
print('Dr collat (601)' + str(collatNzd)) # collat is represented within miner balance
print('Dr miner fee (311)' + str(minerFeeNzd))
print('Dr burn fee (312)' + str(burnFeeNzd))
print('Dr slash (319)' + str(slashNzd))
print('Dr/cr transfers (990)' + str(transfersNzd)) #These are transferred out of info.farm accounts for now
print(' Cr block rewards (200)' + str(blockRewardsNzd))
print(' Cr minerbalance (601) ' + str(minerBalanceNzd))
print('values in NZD')
print('blocks won: ' + str(numBlocksWon))
return mj
if __name__ == '__main__':
#print('you ran the aggregator stand alone: warning no journals posted to Xero')
p = argparse.ArgumentParser(description='Python Aggregator')
p.add_argument('-d', '--day', help='Day you want in format yyyy-mm-dd', required=True)
p.add_argument('-p', '--print', help='Print the journal to std out', required=False, default=True)
p.add_argument('-a', '--archive',
help='Path for CSV output (default '+data_folders.JOURNAL_ARCHIVE+') or "none" for no archive',
required=False, default=data_folders.JOURNAL_ARCHIVE)
args = p.parse_args()
day = datetime.datetime.strptime(args.day, "%Y-%m-%d")
getJournalForDay(day, args.print, args.archive)
# getJournalForDay(datetime.date(2020,11,1))
|
[
"ben.norquay@gmail.com"
] |
ben.norquay@gmail.com
|
c53abe85917f5c583e0e2c182c85a9f49ef08c4f
|
e2062cd61cccc19cb71282278b4df47cd18dfc67
|
/protected/api/urls.py
|
e7949075aa3f8e20b7fe02d14784f12461e41db6
|
[] |
no_license
|
Ryanb58/cas-app
|
4dde3be9fc8b96a599c2c030d0055a53ec34d76a
|
703cd3e3a460429fab9f77ea09a7cfcae741fead
|
refs/heads/master
| 2020-03-08T06:14:05.973338
| 2018-04-23T15:51:13
| 2018-04-23T15:51:13
| 127,966,350
| 0
| 0
| null | 2018-04-24T19:47:55
| 2018-04-03T20:40:49
|
Python
|
UTF-8
|
Python
| false
| false
| 128
|
py
|
"""
"""
from django.conf.urls import url, include
from api.views import Me
urlpatterns = [
url('^me/$', Me.as_view()),
]
|
[
"btimby@gmail.com"
] |
btimby@gmail.com
|
b5d479d41474d8731c2cfd3f260974e30e31840c
|
51d602577affebc8d91ffe234f926469d389dc75
|
/lis/specimen/lab_aliquot_list/admin/main.py
|
9476608e28ed9df6cca8fc8f6b9db9afd9ab3633
|
[] |
no_license
|
botswana-harvard/lis
|
5ac491373f74eaf3855f173580b000539d7f4740
|
48dc601ae05e420e8f3ebb5ea398f44f02b2e5e7
|
refs/heads/master
| 2020-12-29T01:31:07.821681
| 2018-06-24T06:06:57
| 2018-06-24T06:06:57
| 35,820,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
from django.contrib import admin
from ..models import AliquotCondition, AliquotType
class AliquotTypeAdmin(admin.ModelAdmin):
list_display = ('alpha_code', 'numeric_code', 'name', 'created', 'modified')
ordering = ['name']
admin.site.register(AliquotType, AliquotTypeAdmin)
class AliquotConditionAdmin(admin.ModelAdmin):
list_display = ('display_index', 'name', 'short_name', 'field_name', 'created', 'modified')
ordering = ['display_index']
admin.site.register(AliquotCondition, AliquotConditionAdmin)
|
[
"ckgathi@gmail.com"
] |
ckgathi@gmail.com
|
9e89e57942498b3484116d12eb5f43f7357868ed
|
748e074552291b5aacacce30f53c8c55302a7629
|
/src/fava/core/watcher.py
|
5bfb2fd3312641ec413142768279b6f282098444
|
[
"MIT"
] |
permissive
|
dallaslu/fava
|
fb5d2eeb53f8f58a40fa80f7111a255b1aaf1a7f
|
e96b784d960c9981bb566b595b2edb543b63b9a0
|
refs/heads/main
| 2023-08-23T19:29:52.357402
| 2021-10-16T02:35:21
| 2021-10-16T02:35:21
| 417,691,770
| 0
| 0
|
MIT
| 2021-10-16T02:22:01
| 2021-10-16T02:22:00
| null |
UTF-8
|
Python
| false
| false
| 1,644
|
py
|
"""A simple file and folder watcher."""
import os
from typing import Iterable
from typing import List
class Watcher:
"""A simple file and folder watcher.
For folders, only checks mtime of the folder and all subdirectories.
So a file change won't be noticed, but only new/deleted files.
"""
__slots__ = ["_files", "_folders", "_last_checked"]
def __init__(self) -> None:
self._files: List[str] = []
self._folders: List[str] = []
self._last_checked = 0
def update(self, files: Iterable[str], folders: Iterable[str]) -> None:
"""Update the folders/files to watch.
Args:
files: A list of file paths.
folders: A list of paths to folders.
"""
self._files = list(files)
self._folders = list(folders)
self.check()
def check(self) -> bool:
"""Check for changes.
Returns:
`True` if there was a file change in one of the files or folders,
`False` otherwise.
"""
latest_mtime = 0
for path in self._files:
try:
mtime = os.stat(path).st_mtime_ns
except FileNotFoundError:
return True
if mtime > latest_mtime:
latest_mtime = mtime
for path in self._folders:
for dirpath, _, _ in os.walk(path):
mtime = os.stat(dirpath).st_mtime_ns
if mtime > latest_mtime:
latest_mtime = mtime
changed = bool(latest_mtime != self._last_checked)
self._last_checked = latest_mtime
return changed
|
[
"mail@jakobschnitzer.de"
] |
mail@jakobschnitzer.de
|
fd9772a2e0b4d8536ec7184cd8ddcbf7aaf8502e
|
d60f686fbc9287c1fb30defa17f731542c49ffb1
|
/mitmproxy/tools/web/webaddons.py
|
6b52188c2b6c383f9d5a5c8823cb7643e51530af
|
[
"MIT"
] |
permissive
|
tinycarrot/mitmproxy
|
f49b71fb8b15f523a3d9f9732f721b1b1dadc2d5
|
db32d0522c2cc89e13af083372dbb3ba50a5d27f
|
refs/heads/master
| 2020-06-23T00:59:14.425231
| 2019-11-29T15:49:24
| 2019-11-29T15:49:24
| 198,452,537
| 2
| 1
|
MIT
| 2019-07-23T14:57:17
| 2019-07-23T14:57:16
| null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
class WebAddon:
def load(self, loader):
loader.add_option(
"web_open_browser", bool, True,
"Start a browser."
)
loader.add_option(
"web_debug", bool, False,
"Enable mitmweb debugging."
)
loader.add_option(
"web_port", int, 8081,
"Web UI port."
)
loader.add_option(
"web_iface", str, "127.0.0.1",
"Web UI interface."
)
|
[
"git@maximilianhils.com"
] |
git@maximilianhils.com
|
bfb0da6d46807c7ad21b0e0d2e50682075561a3f
|
07bd6d166bfe69f62559d51476ac724c380f932b
|
/devel/lib/python2.7/dist-packages/webots_ros/msg/_StringStamped.py
|
4ab6e7193960537058f1be891960c38d08d48a87
|
[] |
no_license
|
Dangko/webots_differential_car
|
0efa45e1d729a14839e6e318da64c7f8398edd17
|
188fe93c2fb8d2e681b617df78b93dcdf52e09a9
|
refs/heads/master
| 2023-06-02T16:40:58.472884
| 2021-06-14T09:19:58
| 2021-06-14T09:19:58
| 376,771,194
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,773
|
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from webots_ros/StringStamped.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class StringStamped(genpy.Message):
_md5sum = "c99a9440709e4d4a9716d55b8270d5e7"
_type = "webots_ros/StringStamped"
_has_header = True # flag to mark the presence of a Header object
_full_text = """Header header
string data
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
"""
__slots__ = ['header','data']
_slot_types = ['std_msgs/Header','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,data
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(StringStamped, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = ''
else:
self.header = std_msgs.msg.Header()
self.data = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.data = str[start:end].decode('utf-8', 'rosmsg')
else:
self.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.data = str[start:end].decode('utf-8', 'rosmsg')
else:
self.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
|
[
"1477055603@qq.com"
] |
1477055603@qq.com
|
1feb58bb4dc7ff07d48036a6486d2d0671724a17
|
8eab8ab725c2132bb8d090cdb2d23a5f71945249
|
/virt/Lib/site-packages/numpy/tests/test_reloading.py
|
8d8c8aa34be8cc90d783139224604c4f9c6f955f
|
[
"GPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"GPL-3.0-or-later",
"GCC-exception-3.1",
"BSD-3-Clause",
"MIT"
] |
permissive
|
JoaoSevergnini/metalpy
|
6c88a413a82bc25edd9308b8490a76fae8dd76ca
|
c2d0098a309b6ce8c756ff840bfb53fb291747b6
|
refs/heads/main
| 2023-04-18T17:25:26.474485
| 2022-09-18T20:44:45
| 2022-09-18T20:44:45
| 474,773,752
| 3
| 1
|
MIT
| 2022-11-03T20:07:50
| 2022-03-27T22:21:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,244
|
py
|
from numpy.testing import assert_raises, assert_warns, assert_, assert_equal
from numpy.compat import pickle
import sys
import subprocess
import textwrap
from importlib import reload
def test_numpy_reloading():
# gh-7844. Also check that relevant globals retain their identity.
import numpy as np
import numpy._globals
_NoValue = np._NoValue
VisibleDeprecationWarning = np.VisibleDeprecationWarning
ModuleDeprecationWarning = np.ModuleDeprecationWarning
with assert_warns(UserWarning):
reload(np)
assert_(_NoValue is np._NoValue)
assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
assert_raises(RuntimeError, reload, numpy._globals)
with assert_warns(UserWarning):
reload(np)
assert_(_NoValue is np._NoValue)
assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
def test_novalue():
import numpy as np
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_equal(repr(np._NoValue), '<no value>')
assert_(pickle.loads(pickle.dumps(np._NoValue,
protocol=proto)) is np._NoValue)
def test_full_reimport():
"""At the time of writing this, it is *not* truly supported, but
apparently enough users rely on it, for it to be an annoying change
when it started failing previously.
"""
# Test within a new process, to ensure that we do not mess with the
# global state during the test run (could lead to cryptic test failures).
# This is generally unsafe, especially, since we also reload the C-modules.
code = textwrap.dedent(r"""
import sys
from pytest import warns
import numpy as np
for k in list(sys.modules.keys()):
if "numpy" in k:
del sys.modules[k]
with warns(UserWarning):
import numpy as np
""")
p = subprocess.run([sys.executable, '-c', code], capture_output=True)
if p.returncode:
raise AssertionError(
f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}"
)
|
[
"joao.a.severgnini@gmail.com"
] |
joao.a.severgnini@gmail.com
|
287c9bb18313fe4a10e9970681fa5be809d31ad2
|
109a2b213d0c2e4798aa419d47682e2c28ab98f4
|
/archimedean_spiral.py
|
bd12b17761b26bda454b80ed3b93505a05f32bb8
|
[] |
no_license
|
browlm13/cartesian_coordinates_to_single_number
|
d165d0a3638e7177d9b839de5da7df721bc18ad0
|
779abe0c960dab0ec045c6fa08d2b2930b079c16
|
refs/heads/master
| 2021-01-24T11:20:34.084792
| 2018-02-27T05:47:57
| 2018-02-27T05:47:57
| 123,077,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
#!/usr/bin/env python
import math
#import numpy as np
import sys
"""
cartesean coordinates, time parameter conversion using Archimedean Spiral
"""
def archimedean_spiral_to_cartesean(t, a=sys.float_info.min): pass
def archimedean_spiral_from_cartesean(cartesean_pair, a=sys.float_info.min): pass
|
[
"noreply@github.com"
] |
browlm13.noreply@github.com
|
e035bce4e99565c49e21eb92c6219a75cf255440
|
bff5db2a3d7d9b698fbf7512de6ddb87b1f5d45b
|
/python/frequency.py
|
8b67b7a6b5ab4a61d87f414960e1cb017ef74393
|
[] |
no_license
|
aslamup/huffman-coding
|
38e107d1439369e914b4cf5966cf2dfbaa89e300
|
73d90bffd3d5f05df5df55cf2e9e4abcd8826e24
|
refs/heads/master
| 2020-06-04T20:52:21.243970
| 2014-10-19T19:31:09
| 2014-10-19T19:31:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
def frequency(str):
freqs = {}
for ch in str:
freqs[ch] = freqs.get(ch,0) + 1
return freqs
print frequency('aaabccdeeeeeffg')
|
[
"aslamup0042@gmail.com"
] |
aslamup0042@gmail.com
|
10df12ad6bb711a63239c8ec9f4619c7ab6e695b
|
5e726f41a95e1fc79ed98b777ec85a386f7c7a13
|
/Model/SqlAlchemy/Weixin/WeixinModel.py
|
f5802fdf5fc3961cf0a68f3fb26063a215bc973f
|
[] |
permissive
|
825477418/XX
|
a3b43ff2061f2ec7e148671db26722e1e6c27195
|
bf46e34749394002eec0fdc65e34c339ce022cab
|
refs/heads/master
| 2022-08-02T23:51:31.009837
| 2020-06-03T13:54:09
| 2020-06-03T13:54:09
| 262,987,137
| 0
| 0
|
MIT
| 2020-06-03T13:54:10
| 2020-05-11T08:43:30
| null |
UTF-8
|
Python
| false
| false
| 2,004
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/1/3 10:25
# @Email : billsteve@126.com
# @Des :
# @File : WeixinModel
# @Software: PyCharm
from sqlalchemy import Column, Date, String
from sqlalchemy.dialects.mysql import INTEGER, TINYINT
from sqlalchemy.ext.declarative import declarative_base
import XX.Model.SqlAlchemy.BaseModel as BaseModel
Base = declarative_base()
metadata = Base.metadata
class WeixinModel(Base, BaseModel.BaseModel):
__tablename__ = 'weixin'
id = Column(INTEGER(11), primary_key=True)
wx_id = Column(INTEGER(11))
biz = Column(String(32), unique=True)
name = Column(String(255))
gh_id = Column(INTEGER(11))
weixin_id = Column(INTEGER(11))
head_img = Column(String(255))
head_img_circle = Column(String(255))
intro = Column(String(255))
no1 = Column(INTEGER(11))
no2 = Column(String(255))
no3 = Column(INTEGER(11))
no4 = Column(INTEGER(11))
no5 = Column(INTEGER(11))
is_del = Column(TINYINT(1))
update_Ts = Column(INTEGER(11))
create_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.biz = kw.get("biz", None)
self.create_ts = kw.get("create_ts", None)
self.gh_id = kw.get("gh_id", None)
self.head_img = kw.get("head_img", None)
self.head_img_circle = kw.get("head_img_circle", None)
self.id = kw.get("id", None)
self.intro = kw.get("intro", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.name = kw.get("name", None)
self.no1 = kw.get("no1", None)
self.no2 = kw.get("no2", None)
self.no3 = kw.get("no3", None)
self.no4 = kw.get("no4", None)
self.no5 = kw.get("no5", None)
self.update_Ts = kw.get("update_Ts", None)
self.weixin_id = kw.get("weixin_id", None)
self.wx_id = kw.get("wx_id", None)
if __name__ == '__main__':
BaseModel.createInitFunction(WeixinModel)
|
[
"billsteve@126.com"
] |
billsteve@126.com
|
e86db6ca7ee45232db2129e0dbeb2cd18798e760
|
c6053ad14e9a9161128ab43ced5604d801ba616d
|
/Lemon/Python_Base/Lesson5_20181105/homework3.py
|
397cb31981d6b19af4d231ec6df519c528727998
|
[] |
no_license
|
HesterXu/Home
|
0f6bdace39f15e8be26031f88248f2febf33954d
|
ef8fa0becb687b7b6f73a7167bdde562b8c539be
|
refs/heads/master
| 2020-04-04T00:56:35.183580
| 2018-12-25T02:48:51
| 2018-12-25T02:49:05
| 155,662,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,543
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2018/11/6 11:12
# @Author : Hester Xu
# @Email : xuruizhu@yeah.net
# @File : homework3.py
# @Software: PyCharm
'''
有一组用户的登录信息存储在字典 login_info 里面,
字典格式如下:login_info={"admin":"root","user_1":"123456"}
key表示用户名,value表示密码,请编写函数满足如下条件:
1)设计1个登陆的程序, 不同的用户名和对成密码存在个字典里面, 输入正确的用户名和密码去登陆,
2)首先输入用户名,如果用户名不存在或者为空,则一直提示输入正确的用户名
3)当用户名正确的时候,提示去输入密码,如果密码跟用户名不对应,则提示密码错误请重新输入。
4)如果密码输入错误超过三次,中断程序运行。
5)当输入密码错误时,提示还有几次机会
6)用户名和密码都输入正确的时候,提示登陆成功!
'''
login_info={"admin":"root","user_1":"123456"}
username = input("请输入用户名:")
while username not in login_info.keys() or False:
username = input("请输入正确的用户名:")
pwd = input("请输入密码:")
def fac(pwd):
i = 3
while i <= 3:
if pwd in login_info[username]:
print("登录成功")
break
elif i == 0:
break
print("密码错误,还有{}次机会".format(i))
pwd = input("请重新输入密码:")
i -= 1
if pwd in login_info[username]:
print("登录成功")
else:
fac(pwd)
|
[
"xuruizhu@yeah.net"
] |
xuruizhu@yeah.net
|
ef9b12909b95ce4e5116906a628c74c5eac7abc0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02866/s920983279.py
|
fb63213ca0825c257ed3b28e23543535b9e73049
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
import sys, re
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, tan, asin, acos, atan, radians, degrees, log2, gcd
from itertools import accumulate, permutations, combinations, combinations_with_replacement, product, groupby
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from bisect import bisect, bisect_left, insort, insort_left
from heapq import heappush, heappop
from functools import reduce
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def LIST(): return list(map(int, input().split()))
def ZIP(n): return zip(*(MAP() for _ in range(n)))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
#mod = 10 ** 9 + 7
mod = 998244353
from decimal import *
#import numpy as np
#decimal.getcontext().prec = 10
N = INT()
D = LIST()
d = Counter(D)
if D[0] != 0 or 2 <= d[0]:
print(0)
exit()
ans = 1
for i in range(1, max(d.keys())+1):
ans = ans * pow(d[i-1], d[i], mod) % mod
tmp = d[i]
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f78a25cb4f415024b1c9170cad8fd8e15dfcd751
|
f17c78929df552050b3769611b5dfed1b942aa5d
|
/Learning Records and Essays/python/program.py
|
2ab8f506897b246b05560536ba744caeb203d7d9
|
[] |
no_license
|
chirsxjh/My-project
|
f5d4d45e2673898f5fe7aace0d3101efdf847841
|
b9644f268f8c3ec22f47cc7b0a61b66572f1f67a
|
refs/heads/master
| 2022-12-03T16:26:37.391733
| 2020-06-08T07:43:51
| 2020-06-08T07:43:51
| 103,375,743
| 1
| 0
| null | 2022-11-22T01:06:51
| 2017-09-13T08:43:16
|
Python
|
UTF-8
|
Python
| false
| false
| 299
|
py
|
'''
def ADD(x, list=[]):
list.append(x)
return list
list1 = ADD(10)
list2 = ADD(123, [])
list3 = ADD('a')
print "list1 = %s" %list1
print "list2 = %s" %list2
print "list3 = %s" %list3
'''
a = ['天','干','地','址']
a.reverse()
print (a)
b = ''
for i in a:
b = b + str(i)
print (b)
|
[
"1131360171@qq.com"
] |
1131360171@qq.com
|
c816092f5dd0202ad4d6b7c5b4abd35bbfb25cf2
|
77e303d8353170f4181ab9ff66ac77cb57d46caf
|
/src/629A.py
|
1c9f11924c54c36ccc22a0416876514f665be01c
|
[
"MIT"
] |
permissive
|
viing937/codeforces
|
14f689f2e3360939912e927fb830c69f7116b35c
|
5bd8c2bec0e48cb2b4830c26849ea7fda447267c
|
refs/heads/master
| 2022-09-25T19:51:03.891702
| 2022-08-15T15:32:54
| 2022-08-15T15:32:54
| 32,905,529
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
n = int(input())
c = [list(input()) for i in range(n)]
ans = 0
for i in range(n):
t = c[i].count('C')
ans += t*(t-1)//2
c = list(zip(*c))
for i in range(n):
t = c[i].count('C')
ans += t*(t-1)//2
print(ans)
|
[
"viing937@gmail.com"
] |
viing937@gmail.com
|
23b6e3836ef36023c6255d9903431b590aaea51e
|
e3565e1ce607f60745f2a045aae8026661a6b99b
|
/resources/Onyx-1.0.511/py/onyx/signalprocessing/vocalsource.py
|
9863cde71cd98b28ea7fe3c186ac6c9216905a01
|
[
"Apache-2.0"
] |
permissive
|
eternity668/speechAD
|
4c08d953b2ed06b3357b1c39d8709dd088a2471c
|
f270a1be86372b7044615e4fd82032029e123bc1
|
refs/heads/master
| 2021-01-12T22:10:33.358500
| 2014-02-03T16:03:28
| 2014-02-03T16:03:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,268
|
py
|
###########################################################################
#
# File: vocalsource.py (directory: py/onyx/signalprocessing)
# Date: 2008-07-21 Mon 18:01:50
# Author: Hugh Secker-Walker
# Description: Toying around with one-poles for a vocal source
#
# This file is part of Onyx http://onyxtools.sourceforge.net
#
# Copyright 2007 - 2009 The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
###########################################################################
"""
Use of coincident one-pole filters to generate reasonable, reversed,
glotal pulse waveforms.
"""
def make_one_pole(alpha):
def one_pole(alpha):
one_minus_alpha = 1 - alpha
y_n = 0
while True:
x = yield y_n
y_n = alpha * y_n + one_minus_alpha * x
send = one_pole(alpha).send
send(None)
return send
def chain(seq):
seq = tuple(seq)
def gen():
x = None
while True:
x = yield x
for h in seq:
x = h(x)
send = gen().send
send(None)
return send
def test():
"""
>>> op = chain(make_one_pole(0.8) for i in xrange(2))
>>> for x in (1,) + 30 * (0,): print ' ', ' ' * int(1000 * op(x)), '*'
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
>>> for x in (1,) + 25 * (0,) + (.25,) + 30 * (0,): print ' ', ' ' * int(1000 * op(x)), '*'
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
>>> op = chain(make_one_pole(0.6) for i in xrange(3))
>>> for x in (1,) + 20 * (0,): print ' ', ' ' * int(1000 * op(x)), '*'
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
"""
if __name__ == '__main__':
from onyx import onyx_mainstartup
onyx_mainstartup()
|
[
"nassos@n12mavra.cs.ntua.gr"
] |
nassos@n12mavra.cs.ntua.gr
|
6e7840f3b7e05d1ddf4e6c47d41b89b1a83737bf
|
a559b7a111bf95aa301080c34766ca0f16aa7329
|
/Programmers/파이썬을파이썬답게/introduction.py
|
a18bfb35e3fed3b8b4a9cda373735d40f90ef143
|
[] |
no_license
|
arara90/AlgorithmAndDataStructure
|
ccda81d858fdf52aa15d22924b18e7487f659400
|
27280bcc64923f966b854f810560c51e08f3c5be
|
refs/heads/master
| 2022-07-15T05:02:17.157871
| 2022-06-29T14:13:54
| 2022-06-29T14:13:54
| 190,366,467
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
#나쁜예 => C, java에 가까움
def Badsolution(mylist):
answer=[]
for i in mylist:
answer.append(len(i))
return answer
def solution(mylist):
return list(map(len, mylist))
print(solution([[1,2], [1], [1,2,3]]))
|
[
"ararajo@gmail.com"
] |
ararajo@gmail.com
|
1ad6937e8bdea506f7697898bd9d808c4fa4f815
|
5717eefe96f447e4229a5d6fb3fe92a0a34ad123
|
/SelectionProc/asgi.py
|
a94e39c16311db507e9a5cccf757d9ef20f9c847
|
[] |
no_license
|
nayneshrathod/SelectionProces
|
901b4c8063cc036f31604bf9b7f2b6dec4e36f3e
|
0e28e97952f7f8fa5c8490d9fced5c866d0be41b
|
refs/heads/master
| 2022-09-03T18:35:05.642945
| 2020-05-18T09:51:11
| 2020-05-18T09:51:11
| 264,897,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
"""
ASGI config for SelectionProc project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SelectionProc.settings')
application = get_asgi_application()
|
[
"nayneshrathod@gmail.com"
] |
nayneshrathod@gmail.com
|
1435ac835f13f7cf7b2db59ce4d4a74dd0730448
|
00414b9d72c922b873cc2ebcb4d1ce068de5007f
|
/src/backend/partaj/core/migrations/0039_generate_initial_topic_materialzed_paths.py
|
ada91317724825f40c9b247507671d4440208481
|
[
"MIT"
] |
permissive
|
MTES-MCT/partaj
|
1de9691dc6e7615c1d228a0e39c9208b97222dab
|
22e4afa728a851bb4c2479fbb6f5944a75984b9b
|
refs/heads/main
| 2023-08-07T08:22:30.290701
| 2023-08-04T16:57:38
| 2023-08-04T17:22:26
| 237,007,942
| 4
| 3
|
MIT
| 2023-09-14T19:10:26
| 2020-01-29T14:54:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
# Generated by Django 3.0.5 on 2021-01-11 16:29
from django.db import migrations
def forwards(apps, schema_editor):
"""
As we add explicitly non-nullable Materialized Paths for Topics in 0038, we need to generate
initial values for them once as the app migrates along.
This replaces the insignificant "0000" default set in 0039 with real values.
"""
# Important: this migration must be ran with the Partaj code (especially the Topic model) in
# the state of the code at the time of this commit.
# We cannot use the regular `Topic = apps.get_model("core", "Topic")` to get the correct
# version if the model as we need the custom manager for Topic which is not available from
# `apps.get_model`
from partaj.core.models.unit import Topic
Topic.objects.build_materialized_paths()
def backwards(apps, schema_editor):
"""
As topic Materialized Path fields are added with insignificant values in migration 0038,
we can just ignore them here as they should be removed in a migration that goes to 0037,
and it would break Partaj to remove them and stay at 0038.
"""
pass
class Migration(migrations.Migration):
dependencies = [
("core", "0038_add_materialized_path_to_topics"),
]
operations = [
migrations.RunPython(forwards, reverse_code=backwards),
]
|
[
"me@mbenadda.com"
] |
me@mbenadda.com
|
1e8a3348441cdd66ee1ada2eecdd32cfe1cb121c
|
42e66cd537c357e7cb98081a6ebf80c23a8a8613
|
/.history/real_estate/settings_20201111113422.py
|
b872cb7082440c6f663854d2548a252564c4a520
|
[] |
no_license
|
David-Uk/django-real-estate
|
bec5d38379f32e63110a59a32a10a64b1107adca
|
d2d7f4320d7aadde719a48c4c67eb39c22096e2d
|
refs/heads/main
| 2023-01-24T08:58:58.935034
| 2020-12-08T14:19:55
| 2020-12-08T14:19:55
| 310,417,389
| 0
| 0
| null | 2020-12-08T14:19:57
| 2020-11-05T21:07:38
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,316
|
py
|
"""
Django settings for real_estate project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1r6gx$qnirqznxr*b^+81t&(s@bwfcwa14zy1+10k=jyn=*tae'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pserver',
'pages',
'listings',
'realtor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'real_estate.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'real_estate.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'real_estate/static')
]
|
[
"david.ukelere291@gmail.com"
] |
david.ukelere291@gmail.com
|
35ac9fb57d8681c94e0cff9605084217104cdb7b
|
242a8ad0b0939473269a14a02097ede7fe298c80
|
/venv/Scripts/django-admin.py
|
c71cadfc8e6c758496a288e4f8135d1b9e6ba082
|
[] |
no_license
|
Md-Jahid-Hasan/recipe_api
|
f97b2d6c0c5a65c2c52ee572a2885dbae15021e4
|
5815f442820b05ab36747cf52e1b990cd3f6f2d3
|
refs/heads/master
| 2023-04-30T11:44:03.391725
| 2021-05-08T15:31:02
| 2021-05-08T15:31:02
| 365,547,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
#!F:\New folder (2)\Projects\Django\recepi_api\venv\Scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
[
"jahid15-1905@gmail.com"
] |
jahid15-1905@gmail.com
|
ca84e03841878e26147430f70953307c781e0d13
|
9c73dd3043f7db7c9ec76d560484e99ad134fdb6
|
/students/douglas_klos/lesson01/assignment/pytests/test_integration.py
|
ec086b6cc4992e206ac1bb6bd5d2d8197cbecf22
|
[] |
no_license
|
UWPCE-PythonCert-ClassRepos/py220-online-201904-V2
|
546b316025b680ca28d24b523663095398616b13
|
ac12beeae8aa57135bbcd03ac7a4f977fa3bdb56
|
refs/heads/master
| 2022-12-10T03:14:25.514630
| 2019-06-11T02:14:17
| 2019-06-11T02:14:17
| 179,139,181
| 1
| 19
| null | 2022-12-08T01:43:38
| 2019-04-02T18:49:10
|
Python
|
UTF-8
|
Python
| false
| false
| 6,476
|
py
|
#pylint: disable=W0201
"""Pytest cases for integration testing"""
# Douglas Klos
# April 5th, 2019
# Python 220, Lesson 01
# test_integration.py
from inventory_management.inventory_class import Inventory
from inventory_management.furniture_class import Furniture
from inventory_management.electric_appliances_class import ElectricAppliances
# Feels like integration tests are just rebuilding main
# with predefined values and no user input.
# Validates that the modules work together though I suppose.
# Also 80 width linting does not improve readability
# in a language that is all about readability.
class TestClass():
"""Integration tests for inventory_management
Attributes:
item_chair (dict) : dictionary for chair item
item_microwave (dict) : dictionary for microwave electric appliance
item_sofa (dict) : dictionary for sofa furniture
full_inventory (dict) : dictionary database of above items
inventory_string (str) : string containing all data in full_inventory
"""
def setup_method(self):
"""Initialize before each test method"""
self.item_chair = {}
self.item_microwave = {}
self.item_sofa = {}
self.full_inventory = {}
self.inventory_string = ''
self.item_chair['product_code'] = 100
self.item_chair['description'] = 'Chair'
self.item_chair['market_price'] = 111
self.item_chair['rental_price'] = 11
self.full_inventory[self.item_chair['product_code']] = \
Inventory(**self.item_chair).return_as_dictionary()
self.item_microwave['product_code'] = 200
self.item_microwave['description'] = 'Microwave'
self.item_microwave['market_price'] = 222
self.item_microwave['rental_price'] = 22
self.item_microwave['brand'] = 'Samsung'
self.item_microwave['voltage'] = 230
self.full_inventory[self.item_microwave['product_code']] = \
ElectricAppliances(**self.item_microwave).return_as_dictionary()
self.item_sofa['product_code'] = 300
self.item_sofa['description'] = 'Sofa'
self.item_sofa['market_price'] = 333
self.item_sofa['rental_price'] = 33
self.item_sofa['material'] = 'Leather'
self.item_sofa['size'] = 'XL'
self.full_inventory[self.item_sofa['product_code']] = \
Furniture(**self.item_sofa).return_as_dictionary()
for item_code in self.full_inventory:
for value in self.full_inventory[item_code].values():
self.inventory_string += f'{value}'
def test_integration_chair(self):
"""Integration test for chair inventory
Verifies that all chair related data is present.
"""
assert str(self.item_chair['product_code']) in self.inventory_string
assert str(self.item_chair['description']) in self.inventory_string
assert str(self.item_chair['market_price']) in self.inventory_string
assert str(self.item_chair['rental_price']) in self.inventory_string
def test_integration_microwave(self):
"""Integration test for microwave electrical applicance
Verifies that all microwave related data is present.
"""
assert str(self.item_microwave['product_code']) in self.inventory_string
assert str(self.item_microwave['description']) in self.inventory_string
assert str(self.item_microwave['market_price']) in self.inventory_string
assert str(self.item_microwave['rental_price'])in self.inventory_string
assert str(self.item_microwave['brand']) in self.inventory_string
assert str(self.item_microwave['voltage']) in self.inventory_string
def test_integration_sofa(self):
"""Integration test for sofa furniture
Verifies that all sofa related data is present.
"""
assert str(self.item_sofa['product_code']) in self.inventory_string
assert str(self.item_sofa['description']) in self.inventory_string
assert str(self.item_sofa['market_price']) in self.inventory_string
assert str(self.item_sofa['rental_price']) in self.inventory_string
assert str(self.item_sofa['material']) in self.inventory_string
assert str(self.item_sofa['size']) in self.inventory_string
def test_full_string(self):
"""Integration test
We build up a string of all the values in the database
then we go through each expected value and remove it.
If there's nothing left at the end then we pass
"""
self.inventory_string = self.inventory_string.replace(
str(self.item_chair['product_code']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_chair['description']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_chair['market_price']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_chair['rental_price']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_microwave['product_code']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_microwave['description']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_microwave['market_price']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_microwave['rental_price']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_microwave['brand']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_microwave['voltage']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_sofa['product_code']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_sofa['description']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_sofa['market_price']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_sofa['rental_price']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_sofa['material']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_sofa['size']), '')
assert self.inventory_string == ''
|
[
"dougklos@gmail.com"
] |
dougklos@gmail.com
|
b0e25979e15ea5efd7427c6038b6c11cb3178ac7
|
dc025df4a433b82c96fa7a4e064f46ecc948d1a2
|
/subsets.py
|
db716e80dbae2dc466bda23b9e555097d492879d
|
[] |
no_license
|
bingh0616/algorithms
|
c9d3babd6cbf3aefd40fa28a3c839c7201f1028c
|
3b16c72d9361c4bb063e4b2789db695f1e0149bf
|
refs/heads/master
| 2021-01-18T14:10:45.125905
| 2015-11-19T06:03:48
| 2015-11-19T06:03:48
| 35,512,148
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
# problem description: https://leetcode.com/problems/subsets/
class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def subsets(self, nums):
nums.sort()
return self.helper(nums)
def helper(self, nums):
if not nums: return [[]]
res = []
for r in self.helper(nums[1:]):
res.append(r)
res.append([nums[0]]+r)
return res
class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def subsets(self, nums):
nums.sort()
res = [[]]
for n in nums:
res = res + [r + [n] for r in res]
return res
|
[
"bingh0616@gmail.com"
] |
bingh0616@gmail.com
|
0e9cd66bbd0054c01e59c474b682f77a6ffa0e06
|
70f5f279e051360310f95be895320d8fa6cd8d93
|
/extraPackages/matplotlib-3.0.2/examples/text_labels_and_annotations/text_fontdict.py
|
ad6fa8cc972b272be0283fbb0c838ad95c23a5c4
|
[
"BSD-3-Clause"
] |
permissive
|
spacetime314/python3_ios
|
4b16ab3e81c31213b3db1e1eb00230621b0a7dc8
|
e149f1bc2e50046c8810f83dae7739a8dea939ee
|
refs/heads/master
| 2020-05-09T20:39:14.980041
| 2019-04-08T15:07:53
| 2019-04-08T15:07:53
| 181,415,024
| 2
| 0
|
BSD-3-Clause
| 2019-04-15T05:00:14
| 2019-04-15T05:00:12
| null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
"""
=======================================================
Controlling style of text and labels using a dictionary
=======================================================
This example shows how to share parameters across many text objects and labels
by creating a dictionary of options passed across several functions.
"""
import numpy as np
import matplotlib.pyplot as plt
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
x = np.linspace(0.0, 5.0, 100)
y = np.cos(2*np.pi*x) * np.exp(-x)
plt.plot(x, y, 'k')
plt.title('Damped exponential decay', fontdict=font)
plt.text(2, 0.65, r'$\cos(2 \pi t) \exp(-t)$', fontdict=font)
plt.xlabel('time (s)', fontdict=font)
plt.ylabel('voltage (mV)', fontdict=font)
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.show()
|
[
"nicolas.holzschuch@inria.fr"
] |
nicolas.holzschuch@inria.fr
|
b9b7099438fdf0d48829c0a48561dbbc3874bb41
|
865bd0c84d06b53a39943dd6d71857e9cfc6d385
|
/200-number-of-islands/number-of-islands.py
|
5fc729f5608542946c98442cec6944f6321aa2a1
|
[] |
no_license
|
ANDYsGUITAR/leetcode
|
1fd107946f4df50cadb9bd7189b9f7b7128dc9f1
|
cbca35396738f1fb750f58424b00b9f10232e574
|
refs/heads/master
| 2020-04-01T18:24:01.072127
| 2019-04-04T08:38:44
| 2019-04-04T08:38:44
| 153,473,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,227
|
py
|
# Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
#
# Example 1:
#
#
# Input:
# 11110
# 11010
# 11000
# 00000
#
# Output: 1
#
#
# Example 2:
#
#
# Input:
# 11000
# 11000
# 00100
# 00011
#
# Output: 3
#
class Solution:
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0:
return 0
n = len(grid)
m = len(grid[0])
result = 0
for i in range(n):
for j in range(m):
if grid[i][j] == '1':
self.DFSisland(grid,i,j)
result += 1
return result
def DFSisland(self,grid,i,j):
if i < 0 or j<0 or i>=len(grid) or j>=len(grid[0]):
return
if grid[i][j] == '1':
grid[i][j] = 0
self.DFSisland(grid,i+1,j)
self.DFSisland(grid,i-1,j)
self.DFSisland(grid,i,j+1)
self.DFSisland(grid,i,j-1)
|
[
"andyandwei@163.com"
] |
andyandwei@163.com
|
80640c531be3ce486871f2c0e1d8a4fe3315e162
|
297efd4afeb46c0b56d9a975d76665caef213acc
|
/src/core/migrations/0123_auto_20191204_1711.py
|
81bd185dc755bfa58df30b1e75a3d95810624907
|
[
"MIT"
] |
permissive
|
metabolism-of-cities/metabolism-of-cities-platform-v3
|
67716c3daae86a0fe527c18aef26ce29e069cbcc
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
refs/heads/master
| 2022-12-06T22:56:22.207853
| 2020-08-25T09:53:51
| 2020-08-25T09:53:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
# Generated by Django 2.2.7 on 2019-12-04 17:11
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('core', '0122_reference_cityloops_comments_import'),
]
operations = [
migrations.CreateModel(
name='MethodCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', tinymce.models.HTMLField(blank=True, null=True, verbose_name='description')),
],
),
migrations.AddField(
model_name='method',
name='method_class',
field=models.CharField(blank=True, choices=[('3', 'Relation in UM systems'), ('2', 'Flows of substances'), ('1', 'Environmental impacts')], max_length=1, null=True),
),
migrations.AddField(
model_name='method',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.MethodCategory'),
),
]
|
[
"paul@penguinprotocols.com"
] |
paul@penguinprotocols.com
|
3800854302dd23a2b91645a282642a890b8aa887
|
4809471274d6e136ac66d1998de5acb185d1164e
|
/pypureclient/flasharray/FA_2_5/models/array_connection_path_response.py
|
de0fc1399cbe7b264e3b2ab8d767e8ab33deda4c
|
[
"BSD-2-Clause"
] |
permissive
|
astrojuanlu/py-pure-client
|
053fef697ad03b37ba7ae21a0bbb466abf978827
|
6fa605079950765c316eb21c3924e8329d5e3e8a
|
refs/heads/master
| 2023-06-05T20:23:36.946023
| 2021-06-28T23:44:24
| 2021-06-28T23:44:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,193
|
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class ArrayConnectionPathResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[ArrayConnectionPath]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.ArrayConnectionPath]
):
"""
Keyword args:
items (list[ArrayConnectionPath])
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ArrayConnectionPathResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ArrayConnectionPathResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ArrayConnectionPathResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hubert.chan@purestorage.com"
] |
hubert.chan@purestorage.com
|
e76448cde7ed936c5dde66db96302816ecafc4b1
|
36bdbbf1be53ba5f09b9a2b1dd15e91f8f6b0da1
|
/learn/migrations/0004_auto_20181116_1649.py
|
2880faadffc2f92d5d293b247452f5857c030eaf
|
[] |
no_license
|
phufoxy/fotourNew
|
801ab2518424118020dc6e5f31a7ba90a654e56a
|
6048c24f5256c8c5a0d18dc7b38c106a7c92a29c
|
refs/heads/master
| 2023-04-13T01:34:22.510717
| 2018-12-26T03:46:09
| 2018-12-26T03:46:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 649
|
py
|
# Generated by Django 2.1 on 2018-11-16 09:49
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('learn', '0003_auto_20181116_1648'),
]
operations = [
migrations.AlterField(
model_name='speak',
name='date',
field=models.DateTimeField(default=datetime.datetime(2018, 11, 16, 16, 49, 23, 870624)),
),
migrations.AlterField(
model_name='taskspeak',
name='date',
field=models.DateTimeField(default=datetime.datetime(2018, 11, 16, 16, 49, 23, 870624)),
),
]
|
[
"vanphudhsp2015@gmail.com"
] |
vanphudhsp2015@gmail.com
|
41d11890310ec3f84d4e94f421d3c69ba64b9cd6
|
5af277b5819d74e61374d1d78c303ac93c831cf5
|
/tcc/evaluate.py
|
1b7afa40c2f407e4510e06c080a912498d6e67ae
|
[
"Apache-2.0"
] |
permissive
|
Ayoob7/google-research
|
a2d215afb31513bd59bc989e09f54667fe45704e
|
727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7
|
refs/heads/master
| 2022-11-11T03:10:53.216693
| 2020-06-26T17:13:45
| 2020-06-26T17:13:45
| 275,205,856
| 2
| 0
|
Apache-2.0
| 2020-06-26T16:58:19
| 2020-06-26T16:58:18
| null |
UTF-8
|
Python
| false
| false
| 5,634
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate embeddings on downstream tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import tensorflow.compat.v2 as tf
from tcc.algorithms import get_algo
from tcc.config import CONFIG
from tcc.datasets import create_dataset
from tcc.datasets import create_one_epoch_dataset
from tcc.tasks import get_tasks
from tcc.utils import get_embeddings_dataset
from tcc.utils import get_lr_opt_global_step
from tcc.utils import restore_ckpt
from tcc.utils import setup_eval_dir
layers = tf.keras.layers
flags.DEFINE_boolean('continuous_eval', True, 'Evaluate continously.')
flags.DEFINE_string('logdir', '/tmp/alignment_logs', 'Path to logs.')
flags.DEFINE_boolean('defun', True, 'Defun everything!')
flags.DEFINE_boolean('visualize', False, 'Visualize images. Switched off by '
'for default to speed traininig up and take less memory.')
flags.DEFINE_integer(
'max_embs', 0, 'Max number of videos to embed. 0 or less '
'means embed all videos in dataset.')
FLAGS = flags.FLAGS
evaluated_last_ckpt = False
def evaluate_once(algo, iterator_tasks, embedding_tasks, iterators,
summary_writer):
"""Evaluate learnt embeddings on downstream tasks."""
# Sets up model for training.
_, optimizer, global_step = get_lr_opt_global_step()
restore_ckpt(logdir=CONFIG.LOGDIR, optimizer=optimizer, **algo.model)
if global_step.numpy() == CONFIG.TRAIN.MAX_ITERS:
global evaluated_last_ckpt
evaluated_last_ckpt = True
metrics = {}
if iterator_tasks:
with summary_writer.as_default():
with tf.summary.record_if(True):
for task_name, task in iterator_tasks.items():
metrics[task_name] = task.evaluate(algo, global_step,
iterators=iterators)
max_embs = None if FLAGS.max_embs <= 0 else FLAGS.max_embs
if embedding_tasks:
frames_per_batch = CONFIG.EVAL.FRAMES_PER_BATCH
for dataset_name in CONFIG.DATASETS:
dataset = {'name': dataset_name}
train_iterator = create_one_epoch_dataset(
dataset_name,
'train',
mode='eval',
path_to_tfrecords=CONFIG.PATH_TO_TFRECORDS)
dataset['train_dataset'] = get_embeddings_dataset(
algo.model, train_iterator, frames_per_batch=frames_per_batch,
max_embs=max_embs)
val_iterator = create_one_epoch_dataset(
dataset_name,
'val',
mode='eval',
path_to_tfrecords=CONFIG.PATH_TO_TFRECORDS)
dataset['val_dataset'] = get_embeddings_dataset(
algo.model, val_iterator, frames_per_batch=frames_per_batch,
max_embs=max_embs)
with summary_writer.as_default():
with tf.summary.record_if(True):
for task_name, task in embedding_tasks.items():
if task_name not in metrics:
metrics[task_name] = {}
metrics[task_name][dataset_name] = task.evaluate(
algo, global_step, embeddings_dataset=dataset)
# Add all metrics in a separate tag so that analysis is easier.
with summary_writer.as_default():
with tf.summary.record_if(True):
for task_name in embedding_tasks.keys():
for dataset in CONFIG.DATASETS:
tf.summary.scalar('metrics/%s_%s' % (dataset, task_name),
metrics[task_name][dataset],
step=global_step)
avg_metric = sum(metrics[task_name].values())
avg_metric /= len(CONFIG.DATASETS)
tf.summary.scalar('metrics/all_%s' % task_name,
avg_metric, step=global_step)
def timeout_fn():
global evaluated_last_ckpt
return evaluated_last_ckpt
def evaluate():
"""Evaluate embeddings."""
CONFIG.LOGDIR = FLAGS.logdir
logdir = CONFIG.LOGDIR
setup_eval_dir(logdir)
algo = get_algo(CONFIG.TRAINING_ALGO)
if FLAGS.defun:
algo.call = tf.function(algo.call)
algo.compute_loss = tf.function(algo.compute_loss)
iterator_tasks, embedding_tasks = get_tasks(CONFIG.EVAL.TASKS)
# Setup summary writer.
summary_writer = tf.summary.create_file_writer(
os.path.join(logdir, 'eval_logs'), flush_millis=10000)
iterators = {}
if iterator_tasks:
# Setup Dataset Iterators from train and val datasets.
iterators['train_iterator'] = create_dataset('train', mode='eval')
iterators['val_iterator'] = create_dataset('val', mode='eval')
if FLAGS.continuous_eval:
for _ in tf.train.checkpoints_iterator(logdir, timeout=1,
timeout_fn=timeout_fn):
evaluate_once(algo, iterator_tasks, embedding_tasks, iterators,
summary_writer)
else:
evaluate_once(algo, iterator_tasks, embedding_tasks, iterators,
summary_writer)
def main(_):
tf.enable_v2_behavior()
tf.keras.backend.set_learning_phase(0)
evaluate()
if __name__ == '__main__':
app.run(main)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
108773ee0e769f568f2c93989888421559ae51cc
|
07af444dafa5bde373b0730e92d67e455d4ff4df
|
/SFData/StackOverflow/s50483715_ground_truth.py
|
1a82f2ca14d03d0b9d9658ab76abf93061a4d9a0
|
[] |
no_license
|
tensfa/tensfa
|
9114595b58a2e989780af0c348afb89a2abb04b4
|
415dcfaec589b0b14c5b9864872c912f3851b383
|
refs/heads/main
| 2023-06-30T14:27:38.217089
| 2021-08-03T01:33:30
| 2021-08-03T01:33:30
| 368,465,614
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,243
|
py
|
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
img_width, img_height = 150, 150
train_data_dir = os.path.dirname(os.path.realpath(__file__)) + '/../data/generator/train'#train directory generated by train_cla
validation_data_dir = os.path.dirname(os.path.realpath(__file__)) + '/../data/generator/test'#validation directory generated by val_cla
train_samples = 6
validation_samples = 2
epochs = 1
batch_size = 1
input_shape = (img_width, img_height, 1)
#build a sequential model to train data
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(#train data generator
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
val_datagen = ImageDataGenerator(rescale=1. / 255)#validation data generator
train_generator = train_datagen.flow_from_directory(#train generator
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary',
color_mode = 'grayscale')
validation_generator = val_datagen.flow_from_directory(#validation generator
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary',
color_mode = 'grayscale')
model.fit_generator(#fit the generator to train and validate the model
train_generator,
steps_per_epoch=train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_samples // batch_size)
|
[
"tensfa@yeah.net"
] |
tensfa@yeah.net
|
fe28322254d51f1964fb264c3851963777900a5d
|
7e11a563876a05771152448c8c80cf262f3bbc40
|
/python1基础/day10/lambda.py
|
805a2a35b789dcbb7d86fc3d0d48ab5ad7f3a71a
|
[] |
no_license
|
qiujiandeng/-
|
ee8eb1c828177c9796b3a1bda547aa036c19914d
|
bb376535ff9f2fe23828bee32efb1d9010aa38e6
|
refs/heads/master
| 2020-05-25T11:56:39.471770
| 2019-05-21T07:52:22
| 2019-05-21T07:52:22
| 187,779,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
# lambda.py
# def myadd(x,y):
# return x + y
myadd = lambda x,y: x + y#只能放表达式
# x + y
# max("abc")
# print("hello")
# True if x > y else False
# [x**2 for x in range(10)]
print("20+30=",myadd(20,30)) #50
print("1+2=",myadd(1,2)) #3
|
[
"961167272@qq.com"
] |
961167272@qq.com
|
371f962ddc772c29c413d8755c2a6f5596366739
|
d6c117812a618ff34055488337aaffea8cf81ca1
|
/serve/Servr-Servr-desktop-edition/Servr - desktop edition.py
|
66b42077ca95dfcca1875a39f12bbabe7a15c24f
|
[
"Apache-2.0"
] |
permissive
|
c0ns0le/Pythonista
|
44829969f28783b040dd90b46d08c36cc7a1f590
|
4caba2d48508eafa2477370923e96132947d7b24
|
refs/heads/master
| 2023-01-21T19:44:28.968799
| 2016-04-01T22:34:04
| 2016-04-01T22:34:04
| 55,368,932
| 3
| 0
| null | 2023-01-22T01:26:07
| 2016-04-03T21:04:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,121
|
py
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import mimetypes
import os
config_filename = 'Config.txt'
source_directory = 'Resources'
def get_contents_of_file(filepath):
with open(filepath) as in_file:
return in_file.read()
def get_files_dict(directory):
return {filename : get_contents_of_file(os.path.join(directory, filename))
for filename in os.listdir(directory)}
print('Welcome to Servr - desktop edition!')
with open(config_filename, 'a+') as in_file:
pass # if config file does not already exist, create one
config = get_contents_of_file(config_filename).split('\n')
do_auto_start = config[0].lower() if config else 'n'
if do_auto_start == 'y':
print("Getting data from {}...".format(config_filename))
filename, address, port = config[1:4]
else:
filename = raw_input("Enter homepage HTML file name including extension:").strip()
address = raw_input("Enter this device's private IP address:").strip()
port = raw_input("Enter an unused port:").strip()
if filename and address and port:
msg = "Save these values into {}? (No)".format(config_filename)
save_to_cfg = (raw_input(msg).strip().lower() or 'n')[0]
if save_to_cfg == 'y':
with open(config_filename, 'w') as out_file:
out_file.write('\n'.join(['y', filename, address, port, '']))
htmlData = get_contents_of_file(os.path.join(source_directory, filename))
files_dict = get_files_dict(source_directory)
def host(environ, start_response):
mimeType = 'text/html'
status = '200 OK'
path_info = environ.get('PATH_INFO', None)
if path_info in (None, '/', '/home', '/index.html'):
dataToReturn = htmlData
else:
path_info = path_info.strip('/')
dataToReturn = files_dict.get(path_info, None)
if dataToReturn:
mimeType = mimetypes.guess_type(path_info)[0]
else:
dataToReturn = status = '404 Not Found'
headers = [('Content-type', mimeType)]
start_response(status, headers)
return [dataToReturn]
webServer = make_server(address, int(port), host)
print('Serving at url: http://{}:{}'.format(address, port))
webServer.serve_forever()
|
[
"itdamdouni@gmail.com"
] |
itdamdouni@gmail.com
|
0cf3d5c34ba015e36ea3bcae5895e8dd880d5346
|
14557ac358f90f9f055d9072e14ba494d565f072
|
/tool/gaussianElimination.py
|
95ae3fe91b949bae52269de21a66ffda95dbfd1a
|
[] |
no_license
|
August-us/exam
|
d8143359aa85bd8f4047024f3e9f83b662fa23bb
|
092a800a15bdd0f3d0c8f521a5e0fc90f964e8a8
|
refs/heads/master
| 2021-07-01T04:16:15.297648
| 2021-06-26T15:14:31
| 2021-06-26T15:14:31
| 203,525,734
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,542
|
py
|
import numpy as np
def gaussianElimination(A,b):
A=np.matrix(A,dtype=np.float)
b=np.array(b)
assert len(A.shape)==2, "Coefficient matrix is not 2d. "
assert A.shape[0]==b.shape[0], "Coefficient and b do not match."
A=np.concatenate([A,b[:,None]],axis=1)
for i in range(b.shape[0]):
for k in range(i,A.shape[0]):
if A[k,i]:
A[i,:],A[k,:]=A[k,:]/A[k,i],A[i,:]
break
else:
continue
A[i+1:,i:]-=(A[i+1:,i]/A[i,i])*A[i,i:] # 对角阵
return A
def _solve(A):
n=A.shape[1]-1
flag=(A[:n]!=0).any(axis=1)
if flag.all():# 可能有唯一解
x=np.zeros(n)
for i in range(n-1,-1,-1):
assert (A[i,i]!=0.), "Equations without solution"
x[i]=(A[i,n]-np.dot(A[i,:n],x))/A[i,i]
return x
else:
k=flag.sum()
x = np.zeros(n)
for i in range(k-1,-1,-1):
assert (A[i, i] != 0.), "Equations without solution"
x[i] = (A[i, n] - np.dot(A[i,:n], x)) / A[i, i]
k=np.eye(n-k)
return (x,k)
def solve(A,b):
'''
:param A: 系数矩阵
:param b: 常数向量
:return: 返回线性方程组的解,如果有无数个解,返回特解和通解(元组),如果只有唯一解,返回之
'''
A=gaussianElimination(A,b)
return _solve(A)
if __name__ == '__main__':
A=[
[1,0,0,0],
[1,1,3,3],
[1,2,2,4],
[1,3,1,3],
]
b = [4, 18,24,26]
print(solve(A,b))
|
[
"August_us@163.com"
] |
August_us@163.com
|
4960ea804db2bb114054817e0b17cc24f476368c
|
f7e5f77292c2bf595ae034e0a67bf8e01a6e82b1
|
/p957/test_solution.py
|
f63de72edfe3a4b41502aaa4b0da7fa80a572715
|
[] |
no_license
|
carwestsam/leetCode
|
af0d64d8d52597c88441b811ce6609a056ef290e
|
8075fbb40987d5e6af8d30941a19fa48a3320f56
|
refs/heads/master
| 2021-01-20T22:28:59.652051
| 2019-03-22T01:11:30
| 2019-03-22T01:11:30
| 60,820,215
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
from unittest import TestCase
from p957.Solution import Solution
class TestSolution(TestCase):
def test_prisonAfterNDays(self):
sol = Solution()
self.assertEqual([0, 0, 1, 1, 0, 0, 0, 0], sol.prisonAfterNDays([0, 1, 0, 1, 1, 0, 0, 1], 7))
self.assertEqual([0, 0, 1, 1, 1, 1, 1, 0], sol.prisonAfterNDays([1, 0, 0, 1, 0, 0, 1, 0], 1000000000))
self.assertEqual(sol.raw([1, 0, 0, 1, 0, 1, 1, 0], 300),
sol.prisonAfterNDays([1, 0, 0, 1, 0, 1, 1, 0], 300))
|
[
"carwestsam@gmail.com"
] |
carwestsam@gmail.com
|
b746bc216e8a269465b30ae48c46f3f6e68f5839
|
afd85583cd544f6c909797579f4c28aae89e0828
|
/src/main/seg/common/Term.py
|
6f33143d1a58375ace4f45cc3634404ecd5475bd
|
[] |
no_license
|
chuanfanyoudong/python_hanlp
|
a127f61b0847031677532ea7ec54cdbb65ac7486
|
5417778c51f7a209e8a866b884de6c7a6392b203
|
refs/heads/master
| 2020-04-13T08:50:26.386240
| 2019-01-14T15:10:35
| 2019-01-14T15:10:35
| 163,093,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
"""
@author: zkjiang
@contact: jiang_zhenkang@163.com
@software: PyCharm
@file: Term.py
@time: 2019/1/1 21:49
"""
"""
单词类,用户可以直接访问单词的全部属性
"""
class Term(object):
def __init__(self, word, nature):
self.word = word # 词语
self.nature = nature # 词性
def __str__(self):
return self.word + "/" + self.nature
def length(self):
"""
:return:词的长度
"""
return len(self.word)
def getFrequency(self):
"""
获取词语在python_hanlp词库的频次
后续需要补充对词表的类
:return: 频次, 0代表这是个新词
"""
return 0
def equals(self, obj):
"""
比较两个Term是否相等
:param obj: 0或者1
:return:
"""
if isinstance(obj, Term):
if self.nature == obj.nature and self.word == obj.word:
return 1
return 0
|
[
"qaz3762541@163.com"
] |
qaz3762541@163.com
|
120381ac7f07d8aab59ed666511478e8e1f171ec
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/linux_os_info_fragment.py
|
a7b98b278ea789ddba8dba8d41a36115adb68fee
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LinuxOsInfoFragment(Model):
"""Information about a Linux OS.
:param linux_os_state: The state of the Linux OS (i.e. NonDeprovisioned,
DeprovisionRequested, DeprovisionApplied). Possible values include:
'NonDeprovisioned', 'DeprovisionRequested', 'DeprovisionApplied'
:type linux_os_state: str or ~azure.mgmt.devtestlabs.models.LinuxOsState
"""
_attribute_map = {
'linux_os_state': {'key': 'linuxOsState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LinuxOsInfoFragment, self).__init__(**kwargs)
self.linux_os_state = kwargs.get('linux_os_state', None)
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
56ea4ad0b716b48c1a5d574a64c6d641fbb21a2a
|
4a36b5979b0753b32cff3956fd97fb8ed8b11e84
|
/0.23/_downloads/6a455d4d592574555169872fa244fae6/mne_inverse_connectivity_spectrum.py
|
3cc10a8a9c242abf902bf27a70eee7a722a71e14
|
[] |
permissive
|
mne-tools/mne-tools.github.io
|
8aac7ae10bf2faeeb875b9a351a5530dc0e53154
|
495e878adc1ef3374e3db88604504d7542b01194
|
refs/heads/main
| 2023-09-03T07:06:00.660557
| 2023-09-03T04:10:18
| 2023-09-03T04:10:18
| 35,639,371
| 12
| 16
|
BSD-3-Clause
| 2023-05-05T19:04:32
| 2015-05-14T22:04:23
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,460
|
py
|
"""
==============================================================
Compute full spectrum source space connectivity between labels
==============================================================
The connectivity is computed between 4 labels across the spectrum
between 7.5 Hz and 40 Hz.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.connectivity import spectral_connectivity
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Load data
inverse_operator = read_inverse_operator(fname_inv)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# Add a bad channel
raw.info['bads'] += ['MEG 2443']
# Pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0 # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
pick_ori="normal", return_generator=True)
# Read some labels
names = ['Aud-lh', 'Aud-rh', 'Vis-lh', 'Vis-rh']
labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % name)
for name in names]
# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
return_generator=True)
fmin, fmax = 7.5, 40.
sfreq = raw.info['sfreq'] # the sampling frequency
con, freqs, times, n_epochs, n_tapers = spectral_connectivity(
label_ts, method='wpli2_debiased', mode='multitaper', sfreq=sfreq,
fmin=fmin, fmax=fmax, mt_adaptive=True, n_jobs=1)
n_rows, n_cols = con.shape[:2]
fig, axes = plt.subplots(n_rows, n_cols, sharex=True, sharey=True)
for i in range(n_rows):
for j in range(i + 1):
if i == j:
axes[i, j].set_axis_off()
continue
axes[i, j].plot(freqs, con[i, j, :])
axes[j, i].plot(freqs, con[i, j, :])
if j == 0:
axes[i, j].set_ylabel(names[i])
axes[0, i].set_title(names[i])
if i == (n_rows - 1):
axes[i, j].set_xlabel(names[j])
axes[i, j].set(xlim=[fmin, fmax], ylim=[-0.2, 1])
axes[j, i].set(xlim=[fmin, fmax], ylim=[-0.2, 1])
# Show band limits
for f in [8, 12, 18, 35]:
axes[i, j].axvline(f, color='k')
axes[j, i].axvline(f, color='k')
plt.tight_layout()
plt.show()
|
[
"larson.eric.d@gmail.com"
] |
larson.eric.d@gmail.com
|
3a55742c3f10a2301ca0cb2c6e923ffe93ec7d73
|
5cc3925d0b9790733d67bf1ae74f5d2db9c07c91
|
/ephys_nlm/example_datasets/__init__.py
|
8be94b37d2b0ce1d4f7dedeb20eb9bb3fb312989
|
[
"Apache-2.0"
] |
permissive
|
magland/ephys_nlm
|
a17bb77bbf6e6e6fadc3466695c155acf99684fd
|
e4109c8f123174d9cefe25065ad78e49a4ddb894
|
refs/heads/master
| 2021-07-01T04:07:39.873354
| 2021-05-24T20:24:59
| 2021-05-24T20:24:59
| 237,340,358
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
from .toy_example import toy_example
from .synthesize_random_firings import synthesize_random_firings
from .synthesize_random_waveforms import synthesize_random_waveforms
from .synthesize_timeseries import synthesize_timeseries
|
[
"jmagland@flatironinstitute.org"
] |
jmagland@flatironinstitute.org
|
8d7ea9d63120eb522d967efccb92a92d86189d5a
|
66634946aec18840c00b0e568c41faf3e9f473e7
|
/Level2/Lessons17686/gamjapark.py
|
162143a6889a0bd6b0edd4c59ff9e13c0cc3a062
|
[
"MIT"
] |
permissive
|
StudyForCoding/ProgrammersLevel
|
0525521b26ad73dcc1fe58a1b2f303b613c3a2f6
|
dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25
|
refs/heads/main
| 2023-08-14T23:15:53.108351
| 2021-10-05T16:04:32
| 2021-10-05T16:04:32
| 354,728,963
| 0
| 1
|
MIT
| 2021-10-05T16:04:33
| 2021-04-05T05:26:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
# [3차] 파일명 정렬
import re
def solution(files):
answer = []
for i in range(len(files)):
name = re.split('(\d+)',files[i]) # 숫자 문자 split
name.append(name[0].lower())
name.append(int(name[1][:5]))
answer.append(name)
answer.sort(key=lambda x: (x[-2],x[-1]))
answer = list(map(lambda x: "".join(x[:len(x)-2]), answer))
return answer
'''
채점을 시작합니다.
정확성 테스트
테스트 1 〉 통과 (0.16ms, 10.4MB)
테스트 2 〉 통과 (0.15ms, 10.3MB)
테스트 3 〉 통과 (2.56ms, 10.9MB)
테스트 4 〉 통과 (3.39ms, 10.8MB)
테스트 5 〉 통과 (2.93ms, 10.7MB)
테스트 6 〉 통과 (3.11ms, 10.7MB)
테스트 7 〉 통과 (2.95ms, 10.7MB)
테스트 8 〉 통과 (2.39ms, 10.6MB)
테스트 9 〉 통과 (2.85ms, 10.8MB)
테스트 10 〉 통과 (2.95ms, 10.8MB)
테스트 11 〉 통과 (2.64ms, 10.7MB)
테스트 12 〉 통과 (3.37ms, 11MB)
테스트 13 〉 통과 (2.61ms, 11MB)
테스트 14 〉 통과 (3.47ms, 11.2MB)
테스트 15 〉 통과 (3.47ms, 11.2MB)
테스트 16 〉 통과 (3.07ms, 10.9MB)
테스트 17 〉 통과 (2.53ms, 10.9MB)
테스트 18 〉 통과 (2.60ms, 10.7MB)
테스트 19 〉 통과 (2.83ms, 10.7MB)
테스트 20 〉 통과 (3.15ms, 10.9MB)
채점 결과
정확성: 100.0
합계: 100.0 / 100.0
'''
|
[
"gojang4@gmail.com"
] |
gojang4@gmail.com
|
87dc014de771f52f91b8ca34703b75633023e3d9
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/sets_20200605191738.py
|
d7dbc86902639a050395ada6e3df8d94816b350c
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
def Strings(str):
values = {}
newArray = []
for i in str:
i.split(':')
Strings(["A:1","B:3","C:3","A:4","B:2"])
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
dad188cc567965957bad39f4ac1d5ce4fc0d2b77
|
5b8073c92445d9934f56c8f4b1df29f6bae83c75
|
/.history/app_20190813205014.py
|
c5ff78835facedfcdf4559d07eeda2e91d305b25
|
[] |
no_license
|
lodorg/bcrest
|
60dd80fd53158038fedcecc00f32965722a4f6dc
|
b44b84bc5b5c80f50e2385ed504107f4e0134f4e
|
refs/heads/master
| 2022-02-25T22:14:42.097130
| 2019-08-13T17:40:30
| 2019-08-13T17:40:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
from flask import Flask, request, flash, redirect
from werkzeug import secure_filename
import os
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'imgs'
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/uploader', methods = ['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
return "No file to uploaded"
file = request.files['file']
if file.filename == '':
flash('No file selected for uploading')
return "No file to uploaded"
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
flash('File successfully uploaded')
return "Success"
else:
flash('Allowed file types are txt, pdf, png, jpg, jpeg, gif')
return "Not allowed type file"
if __name__ == '__main__':
app.run(debug = True)
|
[
"probirds123@gmail.com"
] |
probirds123@gmail.com
|
3e6d34849453d3637b8ab09d1c78e6147d34e310
|
0ba1743e9f865a023f72a14d3a5c16b99ee7f138
|
/problems/test_0190.py
|
ad62eb3b35a711a2e1a2cf135cd8719826a02c15
|
[
"Unlicense"
] |
permissive
|
chrisxue815/leetcode_python
|
d0a38a4168243b0628256825581a6df1b673855c
|
a33eb7b833f6998972e5340d383443f3a2ee64e3
|
refs/heads/main
| 2022-06-20T15:09:27.221807
| 2022-06-02T21:55:35
| 2022-06-02T21:55:35
| 94,590,264
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
import unittest
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
# Hacker's Delight, Figure 7-1
# See OpenJDK Integer.reverse():
# https://github.com/openjdk/jdk/blob/f37d9c8abca50b65ed232831a06d60c1d015013f/src/java.base/share/classes/java/lang/Integer.java#L1753
n = (n & 0x55555555) << 1 | (n >> 1) & 0x55555555
n = (n & 0x33333333) << 2 | (n >> 2) & 0x33333333
n = (n & 0x0f0f0f0f) << 4 | (n >> 4) & 0x0f0f0f0f
return (n << 24) & 0xffffffff | (n & 0xff00) << 8 | (n >> 8) & 0xff00 | n >> 24
class Test(unittest.TestCase):
def test(self):
self._test(2, 0x40000000)
self._test(43261596, 964176192)
def _test(self, n, expected):
actual = Solution().reverseBits(n)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
[
"chrisxue815@gmail.com"
] |
chrisxue815@gmail.com
|
fc7b97550c2640523513a8ada604f94e0c97df40
|
ac5d55e43eb2f1fb8c47d5d2a68336eda181d222
|
/Greedy/392. Is Subsequence.py
|
439737d72f004b8f25d690b2355b677978a02aa8
|
[] |
no_license
|
tinkle1129/Leetcode_Solution
|
7a68b86faa37a3a8019626e947d86582549374b3
|
1520e1e9bb0c428797a3e5234e5b328110472c20
|
refs/heads/master
| 2021-01-11T22:06:45.260616
| 2018-05-28T03:10:50
| 2018-05-28T03:10:50
| 78,925,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,303
|
py
|
# - * - coding:utf8 - * - -
###########################################
# Author: Tinkle
# E-mail: shutingnjupt@gmail.com
# Name: Is Subsequence.py
# Creation Time: 2017/9/25
###########################################
'''
Given a string s and a string t, check if s is subsequence of t.
You may assume that there is only lower case English letters in both s and t. t is potentially a very long (length ~= 500,000) string, and s is a short string (<=100).
A subsequence of a string is a new string which is formed from the original string by deleting some (can be none) of the characters without disturbing the relative positions of the remaining characters. (ie, "ace" is a subsequence of "abcde" while "aec" is not).
Example 1:
s = "abc", t = "ahbgdc"
Return true.
Example 2:
s = "axc", t = "ahbgdc"
Return false.
'''
class Solution(object):
def isSubsequence(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
pos_s = 0
pos_t = 0
while(pos_s<len(s) and pos_t<len(t)):
if s[pos_s]==t[pos_t]:
pos_t+=1
pos_s+=1
else:
pos_t+=1
return pos_s==len(s)
S = Solution()
print S.isSubsequence('abc','ahbgdc')
print S.isSubsequence('axc','ahbgdc')
|
[
"496047829@qq.com"
] |
496047829@qq.com
|
037b9190c649cf36abbd016974d528e2fec1ac1a
|
b1c17f43cb08740f519b0bd32bb93d9095135fc7
|
/sawyer/mujoco/tasks/__init__.py
|
15848b59633bc62899fafbfacf1cf4f8af3bbc6a
|
[
"MIT"
] |
permissive
|
rlworkgroup/gym-sawyer
|
05f2a28a8c3e1a3031c6539db0f6b503e771d07b
|
90d706cb0594c27045162bc9a00d56389f17615f
|
refs/heads/master
| 2020-04-01T19:12:37.672577
| 2019-12-11T19:56:43
| 2019-12-11T19:56:43
| 153,541,945
| 37
| 10
|
MIT
| 2019-12-11T19:56:45
| 2018-10-18T00:45:57
|
Python
|
UTF-8
|
Python
| false
| false
| 334
|
py
|
from sawyer.mujoco.tasks.reacher_tasks import ReachTask
from sawyer.mujoco.tasks.pick_and_place_tasks import PickTask, PlaceTask
from sawyer.mujoco.tasks.toy_tasks import (InsertTask, RemoveTask, OpenTask,
CloseTask)
__all__ = [
"ReachTask", "PickTask", "PlaceTask", "InsertTask", "RemoveTask",
"OpenTask", "CloseTask"
]
|
[
"noreply@github.com"
] |
rlworkgroup.noreply@github.com
|
9530f6b796f2fe2f1a57edbf76ec8b76d220b4fa
|
ab68d9fd15daf0460e92a471a417b188d4594b8f
|
/key.py
|
729f60405a7dfe20726759a3e31bcbf83bfe09f0
|
[] |
no_license
|
baifengbai/Console
|
63a911c850eb3c6c64e8381a14ae34e18fc4a95e
|
d40cb568d9dd1268379616e5d351073e303abfaa
|
refs/heads/master
| 2020-03-20T15:55:40.782551
| 2018-06-15T18:43:09
| 2018-06-15T18:43:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,317
|
py
|
# adopted from TerminalView
_KEY_MAP = {
"enter": "\r",
"backspace": "\x7f",
"tab": "\t",
"space": " ",
"escape": "\x1b",
"down": "\x1b[B",
"up": "\x1b[A",
"right": "\x1b[C",
"left": "\x1b[D",
"home": "\x1b[1~",
"end": "\x1b[4~",
"pageup": "\x1b[5~",
"pagedown": "\x1b[6~",
"delete": "\x1b[3~",
"insert": "\x1b[2~",
"f1": "\x1bOP",
"f2": "\x1bOQ",
"f3": "\x1bOR",
"f4": "\x1bOS",
"f5": "\x1b[15~",
"f6": "\x1b[17~",
"f7": "\x1b[18~",
"f8": "\x1b[19~",
"f9": "\x1b[20~",
"f10": "\x1b[21~",
"f12": "\x1b[24~",
"bracketed_paste_mode_start": "\x1b[200~",
"bracketed_paste_mode_end": "\x1b[201~",
}
# _APP_KEY_MAP = {
# "down": "\x1bOB",
# "up": "\x1bOA",
# "right": "\x1bOC",
# "left": "\x1bOD",
# }
_CTRL_KEY_MAP = {
"up": "\x1b[1;5A",
"down": "\x1b[1;5B",
"right": "\x1b[1;5C",
"left": "\x1b[1;5D",
"@": "\x00",
"`": "\x00",
"[": "\x1b",
"{": "\x1b",
"\\": "\x1c",
"|": "\x1c",
"]": "\x1d",
"}": "\x1d",
"^": "\x1e",
"~": "\x1e",
"_": "\x1f",
"?": "\x7f",
}
_ALT_KEY_MAP = {
"up": "\x1b[1;3A",
"down": "\x1b[1;3B",
"right": "\x1b[1;3C",
"left": "\x1b[1;3D",
}
def _get_ctrl_combination_key_code(key):
key = key.lower()
if key in _CTRL_KEY_MAP:
return _CTRL_KEY_MAP[key]
elif len(key) == 1:
c = ord(key)
if (c >= 97) and (c <= 122):
c = c - ord('a') + 1
return chr(c)
return _get_key_code(key)
return _get_key_code(key)
def _get_alt_combination_key_code(key):
key = key.lower()
if key in _ALT_KEY_MAP:
return _ALT_KEY_MAP[key]
code = _get_key_code(key)
return "\x1b" + code
# def _get_app_key_code(key):
# if key in _APP_KEY_MAP:
# return _APP_KEY_MAP[key]
# return _get_key_code(key)
def _get_key_code(key):
if key in _KEY_MAP:
return _KEY_MAP[key]
return key
def get_key_code(key, ctrl=False, alt=False, shift=False):
"""
Send keypress to the shell
"""
if ctrl:
keycode = _get_ctrl_combination_key_code(key)
elif alt:
keycode = _get_alt_combination_key_code(key)
else:
keycode = _get_key_code(key)
return keycode
|
[
"randy.cs.lai@gmail.com"
] |
randy.cs.lai@gmail.com
|
079530cbf8ae21cb0804fcd789d21375b6add52c
|
cae8adc520ee71ffd9cfc82418152b4ec63f9302
|
/static_server/template/merge_sort.py
|
380683eb6d36d3135c3d8ad67c8363ce46d08626
|
[] |
no_license
|
dong-c-git/WSGIServer
|
55111c04f4bbefe239949ddaea16c71221b7f795
|
1f0b58977e2a951f3c6dec335854dd9d6e31cdfd
|
refs/heads/master
| 2020-08-01T17:03:30.307962
| 2019-11-09T01:45:30
| 2019-11-09T01:45:30
| 211,054,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
#coding:utf-8
def merge_sort(alist):
"""归并排序"""
n = len(alist)
if n <= 1:
return alist
mid = n//2
#left 采用归并排序后形成的有序的新列表
left_li = merge_sort(alist[:mid])
#right 采用归并排序后形成的有序新列表
right_li = merge_sort(alist[mid:])
#将两个有序的子序列合并成一个新的整体
#merge(left,right)
left_pointer,right_pointer = 0,0
result = []
while left_pointer < len(left_li) and right_pointer < len(right_li):
if left_li[left_pointer] <= right_li[right_pointer]:
result.append(left_li[left_pointer])
left_pointer += 1
else:
result.append(right_li[right_pointer])
right_pointer += 1
result += left_li[left_pointer:]
result += right_li[right_pointer:]
return result
if __name__ == '__main__':
li = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print(li)
sorted_li = merge_sort(li)
print(li)
print(sorted_li)
'''时间复杂度分析:
最优时间复杂度:O(nlogn)
最坏时间复杂度:O(nlogn)
稳定性:稳定
'''
|
[
"dc111000@hotmail.com"
] |
dc111000@hotmail.com
|
61023febae45328ee0de5fd4f4e2f7f2e10987be
|
1347434410c173c0eed165acabfda66e3bbf735e
|
/bruhat/interp.py
|
842ad172afe9367baaa8a1088680ba92f7786790
|
[
"MIT"
] |
permissive
|
punkdit/bruhat
|
f6a857e1c7a289440f293f86ca4be9c781243347
|
6c9f94ee725843550459ac04ee9351700f90fcf1
|
refs/heads/master
| 2023-08-30T22:31:44.915358
| 2023-08-29T11:58:21
| 2023-08-29T11:58:21
| 86,669,985
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,973
|
py
|
#!/usr/bin/env python3
from operator import mul
from functools import reduce
import numpy
from bruhat.poly import Poly, Q
from bruhat.util import cross, all_subsets, factorial
from bruhat.argv import argv
from bruhat.elim import solve, shortstr
ring = Q
a = Poly("a", ring)
def interp(vals):
n = len(vals)
p = Poly({}, ring)
one = Poly({():ring.one}, ring)
for i in range(n):
#print(i, vals[i])
y0 = p(a=i)
q = one
for j in range(i):
q = q*(a-j)
#print("q:", q)
for j in range(i):
assert q(a=j) == 0
#print("q(%d)=%s"%(i, q(a=i)))
r = ring.one / factorial(i)
#print("r =", r)
if y0 != vals[i]:
p = p + (vals[i] - y0)*r*q
#print("p(%d)=%s"%(i, p(a=i)))
#print()
return p
def multi_interp(target):
shape = target.shape
n = len(shape)
#print("multi_interp", shape)
vs = 'abcde'[:n]
ms = [Poly(v, ring) for v in vs]
#print(ms)
itemss = [list(range(i)) for i in shape]
coords = []
for idxs in cross(itemss):
namespace = dict((vs[i], idxs[i]) for i in range(n))
#print(namespace)
coords.append(namespace)
A = []
polys = []
for idxs in cross(itemss):
p = reduce(mul, [p**i for (p,i) in zip(ms, idxs)])
polys.append(p)
#print(idxs, p)
row = []
for coord in coords:
v = p.substitute(coord)
row.append(ring.promote(v))
A.append(row)
A = numpy.array(A, dtype=object)
A = A.transpose()
#print(A.shape)
#print(shortstr(A))
rhs = target.view()
rhs.shape = (len(A),1)
#print(rhs)
print("solve...")
v = solve(ring, A, rhs)
assert v is not None
print(shortstr(v))
q = ring.zero
for i, p in enumerate(polys):
q = q + v[i, 0]*p
return q
def multi_factorize(p, N=10, denom=2):
vs = p.get_vars()
#print("multi_factorize", vs)
ring = p.ring
d = p.degree
factors = []
idxss = list(all_subsets(len(vs)))
idxss.sort(key = len)
assert idxss[0] == []
idxss.pop(0)
for idxs in idxss:
subvs = [vs[idx] for idx in idxs]
print("subvs:", subvs)
coords = [[-ring.promote(x)/denom for x in range(N)] for v in subvs]
for ii in cross(coords):
kw = dict((subvs[i], ii[i]) for i in range(len(subvs)))
y = p(**kw)
if y!=0:
continue
q = ring.zero
for k,v in kw.items():
#print("\t", k, v)
q += Poly(k, ring) - v
while 1:
print("factor:", q)
div, rem = q.reduce(p)
if rem != 0:
break
factors.append(q)
p = div
print("\t", p)
if p.degree == 1:
break
if p != 1:
factors.append(p)
return factors
def factorize(p):
ring = p.ring
d = p.degree
factors = []
for i in range(6*20):
i = ring.one*i/6
y = p(a=-i)
if y!=0:
continue
while 1:
f = (a+i)
div, rem = f.reduce(p)
if rem != 0:
break
factors.append(a+i)
p = div
if p != 1:
factors.append(p)
return factors
if argv.vals is not None:
vals = argv.get("vals", [1, 4, 10, 20, 35, 56])
p = interp(vals)
print("p =", p)
print("degree =", p.degree)
print("factors:", factorize(p))
#print(["%s"%p(a=i) for i in range(n)])
#print([(a-i).reduce(p)[1] for i in range(n)])
elif 1:
if 0:
# B2
vals = numpy.array(
[[1, 10, 35, 84, 165],
[5, 35, 105, 231, 429],
[14, 81, 220, 455, 810],
[30, 154, 390, 770, 1326],
[55, 260, 625, 1190, 1995]])
name = argv.next() or "A2"
N = int(argv.next() or 4)
import os
data = os.popen("./sl.sage %s %s"%(name, N)).read()
vals = eval(data)
vals = numpy.array(vals)
#vals = vals[:,0,:,0]
vals = vals.copy()
print(vals)
p = multi_interp(vals)
print("degree:", p.degree)
print(p)
#factors = multi_factorize(p)
#print(factors)
elif 0:
f = lambda a, b, c : (a+1)*(b+1)*(c+1)*(a+b+2)*(b+c+2)*(a+b+c+3)//12
N = 5
for c in range(3):
for b in range(N):
for a in range(N):
print("%6s"%f(a, b, c), end=" ")
print()
print()
elif 0:
f = lambda a, b, c, d : (
(a+1)*(b+1)*(c+1)*(d+1)*
(a+b+2)*(b+c+2)*(c+d+2)*
(a+b+c+3)*(b+c+d+3)*
(a+b+c+d+4)
//288)
N = 5
for d in range(3):
for c in range(3):
for b in range(N):
for a in range(N):
print("%6s"%f(a, b, c, d), end=" ")
print()
print()
print()
|
[
"simon@arrowtheory.com"
] |
simon@arrowtheory.com
|
c95ae527b44134a8cadd967e79f37488605ec84d
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/c09595ef061b70e4df39ae8cf5cc8b1905550faec62d2ffdd3e03598833e50ce/PyQt5/QtQuick/QSGMaterial.py
|
93e72583b02cd1bd2751c8b8edd627440be97d51
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,602
|
py
|
# encoding: utf-8
# module PyQt5.QtQuick
# from C:\Users\Doly\Anaconda3\lib\site-packages\PyQt5\QtQuick.pyd
# by generator 1.147
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import PyQt5.QtGui as __PyQt5_QtGui
import PyQt5.QtQml as __PyQt5_QtQml
import sip as __sip
class QSGMaterial(__sip.wrapper):
""" QSGMaterial() """
def compare(self, QSGMaterial): # real signature unknown; restored from __doc__
""" compare(self, QSGMaterial) -> int """
return 0
def createShader(self): # real signature unknown; restored from __doc__
""" createShader(self) -> QSGMaterialShader """
return QSGMaterialShader
def flags(self): # real signature unknown; restored from __doc__
""" flags(self) -> QSGMaterial.Flags """
pass
def setFlag(self, Union, QSGMaterial_Flags=None, QSGMaterial_Flag=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
""" setFlag(self, Union[QSGMaterial.Flags, QSGMaterial.Flag], enabled: bool = True) """
pass
def type(self): # real signature unknown; restored from __doc__
""" type(self) -> QSGMaterialType """
return QSGMaterialType
def __init__(self): # real signature unknown; restored from __doc__
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
Blending = 1
CustomCompileStep = 16
RequiresDeterminant = 2
RequiresFullMatrix = 14
RequiresFullMatrixExceptTranslate = 6
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
fa7a3160b9d8970589e8b2ca45c4b350ba6993d8
|
73f09d06295ae4cf949b4b18a8dced861d3067f2
|
/cli/almond_cloud/cmd/k8s/tail.py
|
fc4cd3f4732b7178b8f4db39c1e47032b8bdac03
|
[
"Apache-2.0"
] |
permissive
|
tribe-health/almond-cloud
|
b7d8f6aeca83e047dfc28e5f84bb441f7b9646a7
|
95744da7aec789359093689704f4e2a989de1600
|
refs/heads/master
| 2023-06-28T02:39:32.162066
| 2022-12-20T03:24:54
| 2022-12-20T03:24:54
| 269,748,762
| 0
| 0
|
NOASSERTION
| 2020-06-05T19:12:45
| 2020-06-05T19:12:44
| null |
UTF-8
|
Python
| false
| false
| 3,637
|
py
|
from typing import Iterable, List, NoReturn, Optional
from queue import Queue
from threading import Thread
import splatlog as logging
from kubernetes import client, config
from kubernetes.watch import Watch
from kubernetes.client.models.v1_pod import V1Pod
from clavier import arg_par, err, io
from almond_cloud.lib import targets
from almond_cloud.config import CONFIG
LOG = logging.getLogger(__name__)
DESC = f"""\
Follow logs of one or more pods.
"""
def add_parser(subparsers: arg_par.Subparsers):
parser = subparsers.add_parser(
"tail",
target=tail,
help=DESC.splitlines()[0],
description=DESC,
)
parser.add_argument(
"pod_names",
nargs="+",
help="Pods to follow, which are prefix-matched against the name",
)
parser.add_argument(
"-t",
"--target",
dest="target_name",
default="local",
help="Target name with the Thingpedia url and access-token to use",
)
parser.add_argument(
"-l",
"--lines",
dest="tail_lines",
default=42,
help="How many lines to print at start",
)
def match_pod_name(pod_names: Iterable[str], pod: V1Pod) -> bool:
for name in pod_names:
if pod.metadata.name == name or pod.metadata.name.startswith(
f"{name}-"
):
return True
return False
def tail_one(
api_v1: client.CoreV1Api, pod_name: str, namespace: str, tail_lines: int
) -> NoReturn:
watch = Watch()
color_name = io.capture(f"[dim white]{pod_name}[/]", end="")
for line in watch.stream(
api_v1.read_namespaced_pod_log,
pod_name,
namespace,
tail_lines=tail_lines,
):
print(f"{color_name} {line}")
def _thread_tail(
queue: Queue,
api_v1: client.CoreV1Api,
pod_name: str,
pad_width: int,
namespace: str,
) -> NoReturn:
watch = Watch()
padded_name = ("{:<" + str(pad_width) + "}").format(pod_name)
left_col = io.capture(f"[dim white]{padded_name}[/]", end="")
for line in watch.stream(
api_v1.read_namespaced_pod_log, pod_name, namespace, tail_lines=0
):
queue.put(left_col + line)
def tail_many(
api_v1: client.CoreV1Api, pod_names: List[str], namespace: str
) -> NoReturn:
max_name_length = max(len(n) for n in pod_names)
pad_width = (int(max_name_length / 4) + 1) * 4
queue = Queue()
threads = [
Thread(
target=_thread_tail,
args=(queue, api_v1, pod_name, pad_width, namespace),
)
for pod_name in pod_names
]
for thread in threads:
thread.setDaemon(True)
thread.start()
while True:
print(queue.get())
def tail(pod_names: List[str], target_name: str, tail_lines: int):
target = targets.get(target_name)
namespace = target["k8s.namespace"]
context = target.get("k8s.context")
LOG.info(
"Tailing pods...",
context=context,
namespace=namespace,
)
config.load_kube_config(context=context)
api_v1 = client.CoreV1Api()
all_pods = api_v1.list_namespaced_pod(namespace).items
pods = [pod for pod in all_pods if match_pod_name(pod_names, pod)]
if len(pods) == 0:
LOG.error(
"No pods found.",
pod_names=pod_names,
available_pods=sorted([pod.metadata.name for pod in all_pods]),
)
raise err.UserError("No pods found.")
if len(pods) == 1:
tail_one(api_v1, pods[0].metadata.name, namespace, tail_lines)
tail_many(api_v1, [pod.metadata.name for pod in pods], namespace)
|
[
"neil@neilsouza.com"
] |
neil@neilsouza.com
|
f327539d93e11b288578e502e51e08a094303cb7
|
10b4c22bdb4a1737028c730136d924e1665196c4
|
/src/TheLanguage/Grammars/v1_0_0/IntegrationTests/PassStatement_IntegrationTest.py
|
88edae76da4a931c98fbfec7c935bf7df1615856
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSL-1.0",
"Python-2.0"
] |
permissive
|
misharp/DavidBrownell_TheLanguage
|
54bd886aae3814cd691328aad9ee1e4f65c7c427
|
cae8cbef94b2054f80f6df06e945e70a13a0da69
|
refs/heads/master
| 2023-05-23T16:11:26.372177
| 2021-06-17T04:27:06
| 2021-06-17T04:27:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
# ----------------------------------------------------------------------
# |
# | PassStatement_IntegrationTest.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2021-06-15 17:09:30
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2021
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Automated test for PassStatement.py"""
import os
import textwrap
import CommonEnvironment
from CommonEnvironmentEx.Package import InitRelativeImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with InitRelativeImports():
from . import Execute
from ..CommentStatement import *
# ----------------------------------------------------------------------
def test_Standard():
assert Execute(
textwrap.dedent(
"""\
pass
""",
),
) == textwrap.dedent(
"""\
<Root>
1.0.0 Grammar
Pass
'pass' <<Regex: <_sre.SRE_Match object; span=(0, 4), match='pass'>>> ws:None [1, 1 -> 1, 5]
""",
)
|
[
"db@DavidBrownell.com"
] |
db@DavidBrownell.com
|
c904e60f7ce81205000cd204b1bbed82b6936fa4
|
e953ae5da775a934b86379cfa3d864bb7376fe36
|
/06 basic_python/1.py
|
22e6ddfdbac0d85f102006479e6e997e28f9fad7
|
[] |
no_license
|
agyenes/greenfox-exercises
|
1481f17d1ddd78099d17022aa1800955ae39d92b
|
a2c7912c61708c6ebc53c9a22f8c09550432d4c3
|
refs/heads/master
| 2020-04-11T00:42:17.842170
| 2016-10-19T06:10:22
| 2016-10-19T06:10:22
| 68,081,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
# Create a `Circle` class that takes it's radius as cinstructor parameter
# It should have a `get_circumference` method that returns it's circumference
# It should have a `get_area` method that returns it's area
import math
class Circle():
def __init__(self, radius):
self.radius = radius
def get_circumference(self):
return self.radius * 2 * math.pi
def get_area(self):
return self.radius ** 2 * math.pi
circle1 = Circle(5)
print(circle1.get_circumference())
print(circle1.get_area())
|
[
"aron.gyenes@gmail.com"
] |
aron.gyenes@gmail.com
|
75904dffa7c7e1533d4cdc92760c5409cdef2da5
|
f00699824a8c5def54421ee3cf836ec2cd15d957
|
/3/django_1703_day3/app01/views.py
|
594e5317ae6464d78852b58176e46267d82e60db
|
[] |
no_license
|
ZhiqiWu/student_manage
|
9171d78c32d6900b08de9034b9a2f50c9e24d0b8
|
da12ebaf4e9d6357cd9f832144ed756a55510433
|
refs/heads/master
| 2023-03-29T04:56:09.486061
| 2020-01-24T08:11:50
| 2020-01-24T08:11:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,424
|
py
|
#coding:utf8
from django.shortcuts import render,HttpResponse,render_to_response
import logging
from django.template import Template,Context
import datetime
# Create your views here.
#创建一个日志器
mylogger = logging.getLogger('app01')
class Human(object):
name = '人'
age = 10000
def sayGoodBye(self):
#return 回去的值就是模板中显示的值
return 'sayGoodBye-------------'
# def index(request):
# try:
# with open('wer','r') as f:
# f.read()
# except Exception,e:
# mylogger.error(str(e))
# return HttpResponse('app01 index page ok')
def index(request):
return render(request,'app01/index.html')
def zhale(request):
mylogger.critical('洗澡城炸了')
return HttpResponse('ok')
def tpl(request):
# --------------------------------------------------------------------
t = Template("<h1>My name is {{name}}.</h1>") # 一小段html 加载到Template里,返回Template对象
context = {'name' : 'Alice'} # 上下文字典,准备渲染到模版中的变量
c = Context(context) # 初始化一个Context对象 传入上下文字典
html = t.render(c) # 渲染模板,选入Context对象
# --------------------------------------------------------------------
# render_to_response 不需要传入request对象,
# render需要
# return render_to_response('app01/tpl.html')
# 上下文字典
h1 = Human()
context = {
'name' : '小美',
'engname' : 'XIAOMEI',
'age' : 18,
'sex' : '中',
'score' : 100.99,
#'subject' : ['python','php','java','hadoop','openstack','docker','c++'],
'subject' : [],
'info' : {'interest':'打游戏','money':0},
'h1' : h1,
'china' : [
{'北京' : ['朝阳','海淀',u'三里屯','什刹海','中南海','天安门','changping']},
{'黑龙江' : ['哈尔滨','牡丹江','齐齐哈尔','鸡西','日本','首尔','俄罗斯']},
],
'range': range(1,11),
'desc' : "了矿务局儿科就了哦字。, \n想,臭美吧厘米",
'desc1' : "how are old you",
'now' : datetime.datetime.now(),
'suibian' : None,
'link' : '<a href="http://www.baidu.com">点我</a>'
}
return render(request,'app01/tpl.html',context) # a1:request对象,a2:模板路径 a3:上下文字典
|
[
"1049939190@qq.com"
] |
1049939190@qq.com
|
70b004e7a623bdaba5208a8f234471c520d7a44b
|
f157635f2e0d175bbbe4d0fdc615cd00e313ea80
|
/ClassificationAndRegression/GeneralizedLinearModels/ARDRegression.py
|
e3df0c5fd3cb189ff63d077914a1d3c28a75eeb3
|
[
"MIT"
] |
permissive
|
kopok2/MachineLearningAlgorithms
|
66a20884bb40fabd0351b33e37ed04ec69bf2691
|
9d5eb9c17a1354e726b79e9cfae9e5638976b919
|
refs/heads/master
| 2021-06-30T14:47:01.490219
| 2020-11-30T09:08:11
| 2020-11-30T09:08:11
| 196,454,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,271
|
py
|
# coding=utf-8
"""ARD Regression.
Automatic Relevance Determination.
"""
import numpy as np
from sklearn import linear_model
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
from scipy import stats
if __name__ == "__main__":
print("Generating data...")
n_samples, n_features = 2000, 50
X = np.random.randn(n_samples, n_features)
rel_features = 10
lambda_ = 0.4
w = np.zeros(n_features)
rel_f = np.random.randint(0, n_features, rel_features)
for i in rel_f:
w[i] = stats.norm.rvs(loc=0, scale=1.0 / np.sqrt(lambda_))
alpha_ = 0.30
noise = stats.norm.rvs(loc=0, scale=1.0 / np.sqrt(lambda_), size=n_samples)
y = np.dot(X, w) + noise
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
print("Fitting model...")
ard = linear_model.ARDRegression()
ard.fit(X_train, y_train)
print("R2 score: {0}".format(r2_score(y_test, ard.predict(X_test))))
print("Plotting predictions...")
plt.scatter(np.arange(n_samples // 2), y_train, color="red")
plt.scatter(np.arange(n_samples // 2) + n_samples // 2, y_test, color="fuchsia")
plt.plot(np.arange(n_samples), ard.predict(X), color="purple")
plt.show()
|
[
"oleszek.karol@gmail.com"
] |
oleszek.karol@gmail.com
|
eaddf9c02bba01b2e67dcfbd369737b7a59732e4
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_103/ch57_2020_04_13_15_35_20_007820.py
|
c5500bc24504c86df8f638052c8b4955a9d7877c
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
def verifica_progressao(lista):
razaopa=lista[1]-lista[0]
razaopg=(lista[1]/lista[0])
if lista[0]==0:
razaopg=lista[2]-lista[1]
for i in range (1,len(lista)):
if (lista[i+1]-lista[i])==razaopa and (lista[i+1]/lista[i])==razaopg:
return 'AG'
else:
if lista[i+1]-lista[i]==razaopa and lista[(len(lista)-1)]-lista[(len(lista)-2)]==razaopa:
return 'PA'
elif lista[i+1]/lista[i]==razaopg and lista[(len(lista)-1)]/lista[(len(lista)-2)]==razaopg:
return 'PG'
else:
return 'NA'
|
[
"you@example.com"
] |
you@example.com
|
11c200638fbf348a5ea5e1f13aec2570258ce52e
|
917aecb0568e70b2b0e3a6969076b0f28e48eca3
|
/archive/wiki_web_traffic_predict/LSTM_train_V1.py
|
a036778335788c1c296e1c898044070e1a1f3d2f
|
[] |
no_license
|
yennanliu/Kaggle.com_mini_project
|
d708af67172144ca2e4bac317e1dc44a59b99a95
|
9ca654692acf5c29c466c26cd101b10d1dd98a7c
|
refs/heads/master
| 2020-05-21T14:00:45.873899
| 2020-03-28T01:36:17
| 2020-03-28T01:36:17
| 50,233,788
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,673
|
py
|
# ops
import numpy as np
import pandas as pd
import datetime as dt
import time
import math
import re
# DL
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.recurrent import LSTM, GRU
from keras.layers import Convolution1D, MaxPooling1D
from keras.callbacks import Callback
def load_data():
train = pd.read_csv('train_1.csv').fillna(0)
return train
# help functions
def get_language(page):
res = re.search('[a-z][a-z].wikipedia.org',page)
if res:
"""
----- fix here for python 3 ----
https://stackoverflow.com/questions/18493677/how-do-i-return-a-string-from-a-regex-match-in-python
"""
return res.group(0)[:2]
return 'na'
def get_aggregated_data(train):
lang_sets = {} # get the search data without language column
lang_sets['en'] = train[train.lang=='en'].iloc[:,0:-1]
lang_sets['ja'] = train[train.lang=='ja'].iloc[:,0:-1]
lang_sets['de'] = train[train.lang=='de'].iloc[:,0:-1]
lang_sets['na'] = train[train.lang=='na'].iloc[:,0:-1]
lang_sets['fr'] = train[train.lang=='fr'].iloc[:,0:-1]
lang_sets['zh'] = train[train.lang=='zh'].iloc[:,0:-1]
lang_sets['ru'] = train[train.lang=='ru'].iloc[:,0:-1]
lang_sets['es'] = train[train.lang=='es'].iloc[:,0:-1]
sums = {} # avg daily searching (for each language )
for key in lang_sets:
sums[key] = lang_sets[key].iloc[:,1:].sum(axis=0) / lang_sets[key].shape[0]
print (sums)
return sums
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
def single_input_LSTM(sums,language):
#for language in sums.keys():
#for language in ['fr']:
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(sums[language].reshape(-1, 1))
print ('language : ', language)
# split into train and test sets
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
print ('-------')
print ('train_size : ', train_size)
print ('test_size : ', test_size)
print ('-------')
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# reshape into X=t and Y=t+1
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=20, batch_size=1, verbose=2)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
# shift train predictions for plotting
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# shift test predictions for plotting
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
#plot
"""
series,=plt.plot(scaler.inverse_transform(dataset)[:,])
prediccion_entrenamiento,=plt.plot(trainPredictPlot[:,],color = 'red')
prediccion_test,=plt.plot(testPredictPlot[:,],color = 'blue')
plt.title('Web View Forecasting (LSTM, lookback=1)')
plt.xlabel('Number of Days from Start')
plt.ylabel('Web View')
plt.legend()
plt.legend([serie,prediccion_entrenamiento,prediccion_test],['all data','train','test'], loc='upper right')
plt.show()
"""
if __name__ == '__main__':
train = load_data()
train['lang'] = train.Page.map(get_language)
print (train.head(3))
sums = get_aggregated_data(train)
single_input_LSTM(sums,'ja')
|
[
"f339339@gmail.com"
] |
f339339@gmail.com
|
f59cf85c3391d05a7cd3c647b118d27d9acc8f44
|
564d6a4d305a8ac6a7e01c761831fb2081c02d0f
|
/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/aio/operations/_virtual_machine_run_commands_operations.py
|
0abb8cec5a340f97b7a6b41b4d141a7d9fcc1479
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
paultaiton/azure-sdk-for-python
|
69af4d889bac8012b38f5b7e8108707be679b472
|
d435a1a25fd6097454b7fdfbbdefd53e05029160
|
refs/heads/master
| 2023-01-30T16:15:10.647335
| 2020-11-14T01:09:50
| 2020-11-14T01:09:50
| 283,343,691
| 0
| 0
|
MIT
| 2020-07-28T22:43:43
| 2020-07-28T22:43:43
| null |
UTF-8
|
Python
| false
| false
| 7,966
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineRunCommandsOperations:
"""VirtualMachineRunCommandsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["models.RunCommandListResult"]:
"""Lists all available run commands for a subscription in a location.
:param location: The location upon which run commands is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RunCommandListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_12_01.models.RunCommandListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RunCommandListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RunCommandListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands'} # type: ignore
async def get(
self,
location: str,
command_id: str,
**kwargs
) -> "models.RunCommandDocument":
"""Gets specific run command for a subscription in a location.
:param location: The location upon which run commands is queried.
:type location: str
:param command_id: The command id.
:type command_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RunCommandDocument, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_12_01.models.RunCommandDocument
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RunCommandDocument"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'commandId': self._serialize.url("command_id", command_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RunCommandDocument', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands/{commandId}'} # type: ignore
|
[
"noreply@github.com"
] |
paultaiton.noreply@github.com
|
c0ae26be6c3e7f00a90660c2254e971d71819294
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02997/s890368623.py
|
91aaff9a40a4f404716381112f1c21a775c6e166
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
N,K=map(int,input().split())
fil=int((N-1)*(N-2)/2)
if K>fil:
print(-1)
else:
M=N-1+fil-K
print(M)
for i in range(N-1):
print(1,i+2)
count=0
list=[]
for j in range(2,N):
for k in range(j+1,N+1):
list.append((j,k))
for i in range(fil-K):
print(list[i][0],list[i][1])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
4920502b9c8a0d5803967e8e6e59f571e503e028
|
ba78d67c366d213c54d7fd88cef0d0bc1d40b1cd
|
/51.py
|
1307af3e0e3d2a0b1e38eff320796cbaf4ad662c
|
[
"MIT"
] |
permissive
|
thaisNY/GuanabaraPy
|
4de00dce606a729fe18936481d77b18efd5f6859
|
a0a3acbd9242a39491a365b07562037d7a936bba
|
refs/heads/main
| 2023-08-23T17:00:28.479117
| 2021-11-05T03:29:17
| 2021-11-05T03:29:17
| 424,429,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
#Leia o primeiro termo de uma pa
#Leia a razão da pa
#Imprima os 10 primeiros termos da pa
a1 = int(input('Digite o primeiro termo da Pa'))
r = int(input('Digite a razão da Pa'))
termo = 0
cont = 0
for c in range (a1,11):
termo = a1 + (cont*r)
print(termo)
cont = cont + 1
|
[
"71556315+thaisNY@users.noreply.github.com"
] |
71556315+thaisNY@users.noreply.github.com
|
f06779f7e8828690d06957ef4314a50c7af1296d
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/3215bc98fe934f21f37fc5cc38ae5f123f444140-<get_srid_info>-fix.py
|
61033a70af5b915d436643ecaa2d98431d9a883a
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 997
|
py
|
def get_srid_info(srid, connection):
'\n Returns the units, unit name, and spheroid WKT associated with the\n given SRID from the `spatial_ref_sys` (or equivalent) spatial database\n table for the given database connection. These results are cached.\n '
from django.contrib.gis.gdal import SpatialReference
global _srid_cache
try:
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
SpatialRefSys = None
(alias, get_srs) = ((connection.alias, (lambda srid: SpatialRefSys.objects.using(connection.alias).get(srid=srid).srs)) if SpatialRefSys else (None, SpatialReference))
if (srid not in _srid_cache[alias]):
srs = get_srs(srid)
(units, units_name) = srs.units
sphere_name = srs['spheroid']
spheroid = ('SPHEROID["%s",%s,%s]' % (sphere_name, srs.semi_major, srs.inverse_flattening))
_srid_cache[alias][srid] = (units, units_name, spheroid)
return _srid_cache[alias][srid]
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
eb5ec60604bedf1e414bdc0f40d2044b46690a1e
|
ab32e6384b7c679a327a4bf1df6dd24c058b78a5
|
/core/site_utils.py
|
449603b8ffa124f07af3cb5eb922b8628204b060
|
[] |
no_license
|
webmaxdev0110/digi-django
|
ad2497791d6d3b6aa74eb697dd7eef324ebb5846
|
4cd52c07bb64e9d9381a957323d277489a02181a
|
refs/heads/master
| 2020-03-23T13:37:12.600565
| 2017-07-10T10:23:15
| 2017-07-10T10:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
from django.contrib.sites.models import Site
from django.http.request import split_domain_port
try:
# python 2
from urlparse import urlparse
except ImportError:
# Python 3
from urllib.parse import urlparse
def get_site_from_request_origin(request):
origin = request.META.get('HTTP_ORIGIN', None)
if not origin:
return None
if not origin.startswith('http'):
# During the test, the ORIGIN has not schema
origin = '//' + origin
netloc = urlparse(origin).netloc
domain, port = split_domain_port(netloc)
try:
return Site.objects.get(domain=domain)
except Site.DoesNotExist:
return None
|
[
"webmax0110.dev@gmail.com"
] |
webmax0110.dev@gmail.com
|
4dc2667a569aca4ad5058cc12a464ca8ebe81cdc
|
a81984a197944169935f005f95a0e69e8c64143b
|
/artifacts/default/Ubuntu_16/get-sdk-2019.01.01-Ubuntu_16.py
|
02eac38bda03b95afbf5e893ac65df8df933f711
|
[] |
no_license
|
pulp-platform/pulp-sdk-release
|
d6531bfb2f55335d02103a63fc5af90877333af3
|
a3ad33b4bd5bcf704580857b9a1adcba3ed2a7ff
|
refs/heads/master
| 2021-06-05T02:10:59.317545
| 2020-01-09T09:12:05
| 2020-01-09T09:12:05
| 132,143,493
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,268
|
py
|
#!/usr/bin/env python3
# This file has been auto-generated and can be used for downloading the SDK it has
# been generated for.
import os
import tarfile
import os.path
import argparse
src="59b44701b6ac8390a97936cbd049256fc2917212"
artefacts=[
["https://iis-artifactory.ee.ethz.ch/artifactory/release/Ubuntu_16/pulp/sdk/mainstream/e5a5beca5677e4ea6fb3db81099a0375d57b5d02/0/sdk.tar.bz2", "pkg/sdk/2019.01.01"],
["https://iis-artifactory.ee.ethz.ch/artifactory/release/Ubuntu_16/pulp/pulp_riscv_gcc/mainstream/1.0.9/0/pulp_riscv_gcc.tar.bz2", "pkg/pulp_riscv_gcc/1.0.9"]
]
exports=[
["PULP_SDK_HOME", "$PULP_PROJECT_HOME/pkg/sdk/2019.01.01"],
["PULP_SDK_INSTALL", "$PULP_SDK_HOME/install"],
["PULP_SDK_WS_INSTALL", "$PULP_SDK_HOME/install/ws"],
["PULP_RISCV_GCC_TOOLCHAIN_CI", "$PULP_PROJECT_HOME/pkg/pulp_riscv_gcc/1.0.9"],
["PULP_RISCV_GCC_VERSION", "3"]
]
sourceme=[
["$PULP_SDK_HOME/env/setup.sh", "$PULP_SDK_HOME/env/setup.csh"]
]
pkg=["sdk", "2019.01.01"]
parser = argparse.ArgumentParser(description='PULP downloader')
parser.add_argument('command', metavar='CMD', type=str, nargs='*',
help='a command to be execute')
parser.add_argument("--path", dest="path", default=None, help="Specify path where to install packages and sources")
args = parser.parse_args()
if len(args.command ) == 0:
args.command = ['get']
if args.path != None:
path = os.path.expanduser(args.path)
if not os.path.exists(path):
os.makedirs(path)
os.chdir(path)
for command in args.command:
if command == 'get' or command == 'download':
dir = os.getcwd()
if command == 'get':
if not os.path.exists('pkg'): os.makedirs('pkg')
os.chdir('pkg')
for artefactDesc in artefacts:
artefact = artefactDesc[0]
path = os.path.join(dir, artefactDesc[1])
urlList = artefact.split('/')
fileName = urlList[len(urlList)-1]
if command == 'download' or not os.path.exists(path):
if os.path.exists(fileName):
os.remove(fileName)
if os.system('wget --no-check-certificate %s' % (artefact)) != 0:
exit(-1)
if command == 'get':
os.makedirs(path)
t = tarfile.open(os.path.basename(artefact), 'r')
t.extractall(path)
os.remove(os.path.basename(artefact))
os.chdir(dir)
if command == 'get' or command == 'download' or command == 'env':
if not os.path.exists('env'):
os.makedirs('env')
filePath = 'env/env-%s-%s.sh' % (pkg[0], pkg[1])
with open(filePath, 'w') as envFile:
#envFile.write('export PULP_ENV_FILE_PATH=%s\n' % os.path.join(os.getcwd(), filePath))
#envFile.write('export PULP_SDK_SRC_PATH=%s\n' % os.environ.get("PULP_SDK_SRC_PATH"))
envFile.write('export %s=%s\n' % ('PULP_PROJECT_HOME', os.getcwd()))
for export in exports:
envFile.write('export %s=%s\n' % (export[0], export[1].replace('$PULP_PROJECT_HOME', os.getcwd())))
for env in sourceme:
envFile.write('source %s\n' % (env[0].replace('$PULP_PROJECT_HOME', os.getcwd())))
#envFile.write('if [ -e "$PULP_SDK_SRC_PATH/init.sh" ]; then source $PULP_SDK_SRC_PATH/init.sh; fi')
#filePath = 'env/env-%s-%s.csh' % (pkg[0], pkg[1])
#with open(filePath, 'w') as envFile:
# envFile.write('setenv PULP_ENV_FILE_PATH %s\n' % os.path.join(os.getcwd(), filePath))
# envFile.write('setenv PULP_SDK_SRC_PATH %s\n' % os.environ.get("PULP_SDK_SRC_PATH"))
# for env in envFileStrCsh:
# envFile.write('%s\n' % (env.replace('@PULP_PKG_HOME@', os.getcwd())))
# envFile.write('if ( -e "$PULP_SDK_SRC_PATH/init.sh" ) then source $PULP_SDK_SRC_PATH/init.sh; endif')
if command == 'src':
if os.path.exists('.git'):
os.system('git checkout %s' % (src))
else:
os.system('git init .')
os.system('git remote add -t \* -f origin git@kesch.ee.ethz.ch:pulp-sw/pulp_pipeline.git')
os.system('git checkout %s' % (src))
|
[
"germain.haugou@iis.ee.ethz.ch"
] |
germain.haugou@iis.ee.ethz.ch
|
bc38d2efb5600eba0aec10d2e7009307896556f1
|
4e96f383d4703ad8ee58869ed91a0c8432c8a051
|
/Cura/Cura/plugins/GCodeReader/MarlinFlavorParser.py
|
482285a2c9508de5ae79665113c0b1fd84a5663f
|
[
"LGPL-3.0-only",
"GPL-3.0-only"
] |
permissive
|
flight7788/3d-printing-with-moveo-1
|
b2dba26010c4fa31815bc1d2d0966161a8600081
|
7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0
|
refs/heads/Feature_Marlin_with_AlanBoy
| 2022-08-30T18:36:44.785058
| 2020-05-30T07:52:58
| 2020-05-30T07:52:58
| 212,583,912
| 0
| 0
|
MIT
| 2020-05-16T07:39:47
| 2019-10-03T13:13:01
|
C
|
UTF-8
|
Python
| false
| false
| 305
|
py
|
# Copyright (c) 2017 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from . import FlavorParser
# This parser is intented for interpret the Marlin/Sprinter Firmware flavor
class MarlinFlavorParser(FlavorParser.FlavorParser):
def __init__(self):
super().__init__()
|
[
"t106360212@ntut.org.tw"
] |
t106360212@ntut.org.tw
|
60e0c37b3231d5f3a8af1f4f81ace45335df8286
|
457b687219cb723585164e84417ed3bacc8c234d
|
/qianfeng_400/爬虫/网络编程/1,TCP客户端.py
|
2d367d1a423db6b3b0d86786545c5eebc10f4263
|
[] |
no_license
|
jxy147258/qianfeng_python
|
ffdc77fb05cfb2302af51fc3047efa0eadeb2064
|
0f6c06fdf19a47f7b5083cde4e1eb2011442c5f7
|
refs/heads/master
| 2021-07-16T15:19:45.759356
| 2021-02-03T16:04:14
| 2021-02-03T16:04:14
| 237,947,869
| 2
| 2
| null | 2020-02-07T06:41:06
| 2020-02-03T11:17:13
|
Python
|
UTF-8
|
Python
| false
| false
| 362
|
py
|
# 使用socket模块
import socket
client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client.connect(("localhost",8081))
count = 0
while True:
count += 1
data = input("请输入数据")
client.send(data.encode("utf-8"))
info = client.recv(1024)
print("服务器说:"+info.decode("utf-8"))
if count > 10:
client.close()
|
[
"1162591945@qq.com"
] |
1162591945@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.