blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a6e6afaeb5f90e76f9c888023e777d3092727f6
|
713197a9519d72610804e1389e57d7c738a3d90e
|
/prjectoWebApp/views.py
|
a08c175f4967eb2c7f31f908e20a818a97745eae
|
[] |
no_license
|
lucianocanales/DjangoProject
|
08cb8bbb8f630f48b447913f8a72ad7e5383db68
|
8491b0c1d1b8d4fe45429e978b67b08abd9600bd
|
refs/heads/master
| 2023-02-27T12:10:23.470759
| 2021-02-13T00:09:58
| 2021-02-13T00:09:58
| 335,425,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
from django.shortcuts import render
# Create your views here.
def home(request):
return render(
request,
'prjectoWebApp/home.html'
)
|
[
"lucianocanales@gmail.com"
] |
lucianocanales@gmail.com
|
d336fd082133272584e6ada46d41b4e97fdb0734
|
992e2ca6f7ef2a87111da24dfa1fc073fd975782
|
/lib/json_datetime.py
|
642919b85095bcf4a25d22c0b0727c3423846150
|
[
"MIT"
] |
permissive
|
daniel-kranowski/boto3-01-report-eb-autoscaling-alarms
|
203e01ec205f201293374c35c8e020cc32480286
|
72ca5d73986a2e45bdcc13d4dce171ddcb52d85a
|
refs/heads/master
| 2020-07-05T10:13:30.980310
| 2019-08-15T22:42:50
| 2019-08-15T22:42:50
| 202,620,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
# Obtained this code from https://gist.github.com/abhinav-upadhyay/5300137
import json
from datetime import datetime
from json import JSONDecoder
from json import JSONEncoder
class DateTimeDecoder(json.JSONDecoder):
def __init__(self, *args, **kargs):
JSONDecoder.__init__(self, object_hook=self.dict_to_object,
*args, **kargs)
def dict_to_object(self, d):
if '__type__' not in d:
return d
type = d.pop('__type__')
try:
dateobj = datetime(**d)
return dateobj
except:
d['__type__'] = type
return d
class DateTimeEncoder(JSONEncoder):
""" Instead of letting the default encoder convert datetime to string,
convert datetime objects into a dict, which can be decoded by the
DateTimeDecoder
"""
def default(self, obj):
if isinstance(obj, datetime):
return {
'__type__' : 'datetime',
'year' : obj.year,
'month' : obj.month,
'day' : obj.day,
'hour' : obj.hour,
'minute' : obj.minute,
'second' : obj.second,
'microsecond' : obj.microsecond,
}
else:
return JSONEncoder.default(self, obj)
|
[
"github-noreply-202D09610@bizalgo.com"
] |
github-noreply-202D09610@bizalgo.com
|
b233ce40007c4997abdcc19a19b7f787186f0880
|
2bc20bc0b4eef8541b66704a9a46e709a438d9fd
|
/ngramsNVI/create_NVI.py
|
e295ff48586929b0f8f2a0c84374bb693ab6c7f9
|
[
"MIT"
] |
permissive
|
warwickpsych/ngramsNVI
|
ed1a41d1a921faa507e7ce9dc7048ad4df2d6295
|
4f07fc22eef973625180c6f9b4827b1324947614
|
refs/heads/master
| 2020-12-03T18:16:20.953496
| 2020-01-02T17:25:18
| 2020-01-02T17:25:18
| 231,426,273
| 0
| 0
|
MIT
| 2020-01-02T17:13:18
| 2020-01-02T17:13:17
| null |
UTF-8
|
Python
| false
| false
| 8,049
|
py
|
import argparse
import logging
import os
import string
import pandas as pd
from ngramsNVI.constants import PACKAGE_LOCATION
from ngramsNVI.utils import rescale, download_nrgams_file
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def load_valence_data(language):
""" Load valence data from the affective word norms (ANEW).
See Hills, T.T., Proto, E., Sgroi, D. et al. Historical analysis of national subjective wellbeing using millions of
digitized books. Nat Hum Behav 3, 1271–1275 (2019) doi:10.1038/s41562-019-0750-z for more information on how this
valence data has been gathered.
For german, we correct the values so that it is in the same range as the other languages.
Parameters
----------
language: str
Load valence data for one of the following languages 'ita', 'eng-gb', 'eng-us', 'spa', 'fre', 'ger'
Returns
-------
valence_data: Pandas.DataFrame
Dataframe with index of words and their associated valence score
"""
if language in ["eng-gb", "eng-us"]:
language = "eng"
valence_data = pd.read_csv("{}/data/ANEW/{}_valence.csv".format(PACKAGE_LOCATION, language), na_filter=False)
if language == "ger":
valence_data.rename(columns={'valence': 'valence_old'}, inplace=True)
valence_data["valence"] = rescale(valence_data["valence_old"].values, -3, 3, 1, 9)
return valence_data
def merge_ngrams_and_ANEW_data(valence_data, ngrams_fpath):
"""Add valence scores from ANEW to downloaded Google ngrams data
Parameters
----------
valence_data: Pandas.DataFrame
DataFrame including the columns: words (ANEW words), valence (ANEW scores for each word)
ngrams_fpath: str
Path of nrgams file which needs to be processed
Returns
-------
ngrams_valence_scores: Pandas.DataFrame
DataFrame for one ngrams letter with the following columns:
nrgram - ANEW word found in nrgams
year - year of data
match_count- amount of times the word was found
volume_count - amount of volumes the word was found in
valence - score from ANEW
"""
ngrams_data = pd.read_table(ngrams_fpath, compression='gzip',
names=["ngram", "year", "match_count", "volume_count"])
ngrams_data["ngram"] = ngrams_data["ngram"].str.lower()
ANEW_words = [k for k in valence_data.word]
ngrams_ANEW_words_data = ngrams_data[ngrams_data.ngram.isin(ANEW_words)]
if len(ngrams_ANEW_words_data) > 0:
ngrams_ANEW_words_by_year = ngrams_ANEW_words_data.groupby(['ngram', 'year']).sum()
ngrams_valence_scores = pd.merge(ngrams_ANEW_words_by_year.reset_index(), valence_data, how='left',
left_on=['ngram'], right_on=['word'])
ngrams_valence_scores = ngrams_valence_scores.drop(['word'], axis=1)
return ngrams_valence_scores
def process_nrgams_data(temp_directory, language, valence_data, delete_files):
"""Process nrgrams data for all letters for a chosen language
Parameters
----------
temp_directory: str
Temp directory location
language: str
Which of the following languages to process 'ita', 'eng-gb', 'eng-us', 'spa', 'fre', 'ger'
valence_data: Pandas.DataFrame
DataFrame including the columns: words (ANEW words), valence (ANEW scores for each word)
delete_files: bool
Whether to delete the file downloaded from ngrams to save on disk space
Returns
-------
ngrams_valence_scores_all_letters: Pandas.DataFrame
DataFrame for all nrgams letter with the following columns:
nrgram - ANEW word found in nrgams
year - year of data
match_count- amount of times the word was found
volume_count - amount of volumes the word was found in
valence - score from ANEW
"""
letters = string.ascii_lowercase
ngrams_valence_scores_processed = []
for letter in letters:
logger.info("Downloading data for {} {}".format(language, letter))
ngrams_fpath = download_nrgams_file(temp_directory, language, letter)
ngrams_valence_scores = merge_ngrams_and_ANEW_data(valence_data, ngrams_fpath)
ngrams_valence_scores_processed.append(ngrams_valence_scores)
if delete_files:
os.remove(ngrams_fpath)
ngrams_valence_scores_all_letters = pd.concat(ngrams_valence_scores_processed)
return ngrams_valence_scores_all_letters
def create_NVI(language, valence_data, delete_files=False):
""" Create a National Valence Index using Google Ngrams data
(http://storage.googleapis.com/books/ngrams/books/datasetsv2.html) and the affective word norms (ANEW) for one of
the following languages: Italian (ita), EnglishGB (eng-gb), Engligh US (eng-us), Spanish (spa), French(fre), or
German(ger).
This function saves the associated files (valence scores with ngrams counts, NVI and missing words which were unable
to be processed) for a language in the "data" directory
Parameters
----------
language: str
Which of the following languages to process 'ita', 'eng-gb', 'eng-us', 'spa', 'fre', 'ger'
valence_data: Pandas.DataFrame
DataFrame including the columns: words (ANEW words), valence (ANEW scores for each word)
delete_files: bool
Whether to delete downloaded ngrams files after processing
Returns
-------
None
"""
# Set up temporary directory to store large files
temp_directory = "{}/googlebooksdata".format(PACKAGE_LOCATION)
os.makedirs(temp_directory, exist_ok=True)
ngrams_valence_data = process_nrgams_data(temp_directory, language, valence_data, delete_files)
logger.info("Calculating valence scores")
total_words_per_year = ngrams_valence_data.groupby('year').agg(
match_totals=("match_count", sum))
ngrams_valence_data = pd.merge(ngrams_valence_data, total_words_per_year, how='left', on=['year'])
ngrams_valence_data["val_score"] = ngrams_valence_data["valence"] * (
ngrams_valence_data["match_count"] / ngrams_valence_data["match_totals"])
# Saving valence scores for all words
ngrams_valence_data.to_csv("{}/data/{}_valence_ngram_words.csv".format(PACKAGE_LOCATION, language), index=False)
# Saving NVI for all words
NVI_data = ngrams_valence_data[["year", "match_count", "val_score"]].groupby(['year']).sum()
NVI_data.to_csv("{}/data/{}_NVI.csv".format(PACKAGE_LOCATION, language))
# Checking for any unprocessed words, as some words will not be found in google ngrams if they are compound words
unprocessed_words = list(set(valence_data['word']) - set(ngrams_valence_data['ngram']))
logger.info("These words could not be processed {}".format(unprocessed_words))
with open("{}/data/{}_unprocessed_words.txt".format(PACKAGE_LOCATION, language), 'a') as file_:
for word in unprocessed_words:
file_.write("{}\n".format(word))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Create a National Valence Index using Google Ngrams data '
'(http://storage.googleapis.com/books/ngrams/books/datasetsv2.html) and the affective word norms '
'(ANEW) for one of the following languages: Italian (ita), EnglishGB (eng-gb), '
'Engligh US (eng-us), Spanish (spa), French(fre), or German(ger).')
parser.add_argument('-l', '--language', choices=['ita', 'eng-gb', 'eng-us', 'spa', 'fre', 'ger'],
help='The language to process')
parser.add_argument("-d", "--delete_files", help="Whether to delete downloaded ngrams files after processing",
action='store_true')
args = parser.parse_args()
valence_data = load_valence_data(language=args.language)
create_NVI(language=args.language, valence_data=valence_data, delete_files=args.delete_files)
|
[
"cseresinhe@turing.ac.uk"
] |
cseresinhe@turing.ac.uk
|
a4b2331bc60e49067ff8516c4b13766c7a4c9c5e
|
e60a342f322273d3db5f4ab66f0e1ffffe39de29
|
/parts/zodiac/pyramid/tests/test_config/pkgs/scannable/another.py
|
2022b704558f0f407eb359cc3d36dfdfe3a9041b
|
[] |
no_license
|
Xoting/GAExotZodiac
|
6b1b1f5356a4a4732da4c122db0f60b3f08ff6c1
|
f60b2b77b47f6181752a98399f6724b1cb47ddaf
|
refs/heads/master
| 2021-01-15T21:45:20.494358
| 2014-01-13T15:29:22
| 2014-01-13T15:29:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 102
|
py
|
/home/alex/myenv/zodiac/eggs/pyramid-1.4-py2.7.egg/pyramid/tests/test_config/pkgs/scannable/another.py
|
[
"alex.palacioslopez@gmail.com"
] |
alex.palacioslopez@gmail.com
|
0c9b03f3a2bc72ab73f04e6bb21e080511057ade
|
55c09f8aea71ccf2d8611f704d5ef4a6e86b900d
|
/client/drawing/constants.py
|
1e03409cca4dc849e05e152eac4257ca70db9188
|
[] |
no_license
|
victormorozov1/tanks-server
|
56412fb39b82c54284ef864262995b2905aaca0e
|
6f5e09969c5e5b83b57b1b2665ec7b408ebef347
|
refs/heads/master
| 2020-12-10T07:45:26.967586
| 2020-02-20T08:45:40
| 2020-02-20T08:45:40
| 233,537,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
import pygame
CELL_SZ = 50
pygame.init()
display_size = pygame.display.Info()
SZX, SZY = display_size.current_w, display_size.current_h
#SZX, SZY = 800, 800
SAVERS_NUM = 3
|
[
"vmn3w@yandex.ru"
] |
vmn3w@yandex.ru
|
9c7d677d074b5d250abc200c103cff8fb806b269
|
df94f543424f47f87bd6d546cca23d1c5a7b024c
|
/easy/easy922.py
|
f8409367478a0930ddc49d9bb4bc49ab8b62ce17
|
[] |
no_license
|
wangpeibao/leetcode-python
|
c13cb63304e91dcd55ffacee541d9197cafd01ff
|
392a272a799decdd77c2410a89787ea8e1aa76d3
|
refs/heads/master
| 2023-01-31T05:09:34.850459
| 2020-12-04T03:25:21
| 2020-12-04T03:25:21
| 257,457,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
'''
922. 按奇偶排序数组 II
给定一个非负整数数组 A, A 中一半整数是奇数,一半整数是偶数。
对数组进行排序,以便当 A[i] 为奇数时,i 也是奇数;当 A[i] 为偶数时, i 也是偶数。
你可以返回任何满足上述条件的数组作为答案。
示例:
输入:[4,2,5,7]
输出:[4,5,2,7]
解释:[4,7,2,5],[2,5,4,7],[2,7,4,5] 也会被接受。
提示:
2 <= A.length <= 20000
A.length % 2 == 0
0 <= A[i] <= 1000
'''
from typing import List
class Solution:
def sortArrayByParityII(self, A: List[int]) -> List[int]:
# 双指针
start = 0
length = len(A)
while start < length:
if (start % 2 == 0 and A[start] % 2 == 0) or (start % 2 == 1 and A[start] % 2 == 1):
start += 1
continue
# 处理找到下个不符合的
end = start + 1
while end < length:
if (end % 2 == 0 and A[end] % 2 == 0) or (end % 2 == 1 and A[end] % 2 == 1):
end += 2
else:
A[start], A[end] = A[end], A[start]
start = start + 2
return A
so = Solution()
print(so.sortArrayByParityII([4,2,5,7]) == [4,5,2,7])
|
[
"wangpeibao@troila.com"
] |
wangpeibao@troila.com
|
8715867deb70edd12ab5ba34f25f98faff89d712
|
ef0f1ed165ef71f3334c10b3770face188e8d894
|
/python/week6/survey_form/apps/surveyform/urls.py
|
b9a7ac204cb294a0e627c57b465392e956dea86c
|
[] |
no_license
|
alfredgarcia/codingdojo
|
55e6a9ded3e5eb01409c94d696569cae8a358660
|
c267d0d330831ba9c3a551d20ffc60392a0fac38
|
refs/heads/master
| 2020-09-13T22:33:01.581800
| 2017-01-04T22:04:14
| 2017-01-04T22:04:14
| 67,725,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
from django.conf.urls import url
from . import views
# from django.contrib import admin
urlpatterns = [
url(r'^$', views.index),
url(r'^process$', views.FormProcess),
url(r'^showresults$', views.ShowResults),
]
|
[
"alfredg2007@yahoo.com"
] |
alfredg2007@yahoo.com
|
6a100d0efde491779e1959f83bce860efd32e8da
|
dc22ea62344512e3157ba00d67e9f227f0a79288
|
/analysis/match_2017_stp26.py
|
cbadf8f2d701ca9ae7891b32b75ff1a08d9d9a09
|
[
"MIT"
] |
permissive
|
opensafely/post-covid-kidney-outcomes
|
75e7ce252cdb4d2bce3ab98610fec257ea0ab5b5
|
23bbb65b2c849e460ddb15eab0fcd4abb03bd972
|
refs/heads/main
| 2023-08-31T07:49:42.396915
| 2023-08-30T18:40:24
| 2023-08-30T18:40:24
| 444,884,435
| 0
| 0
|
MIT
| 2022-06-16T12:04:03
| 2022-01-05T17:03:12
|
Python
|
UTF-8
|
Python
| false
| false
| 550
|
py
|
import pandas as pd
from osmatching import match
match(
case_csv="input_covid_matching_2017_stp26",
match_csv="input_2017_matching_stp26",
matches_per_case=3,
match_variables={
"male": "category",
"age": 0,
},
index_date_variable="covid_date",
replace_match_index_date_with_case="3_years_earlier",
date_exclusion_variables={
"death_date": "before",
"date_deregistered": "before",
"krt_outcome_date": "before",
},
output_suffix="_2017_stp26",
output_path="output",
)
|
[
"viyaasan@hotmail.com"
] |
viyaasan@hotmail.com
|
c7a2a13c8096b8fda05eaeda2c8d6143b7036182
|
c706cb904e172d773bdc0c3f233d0c0e560cd0fa
|
/transfer_uvs.py
|
0c085b03a027ee3ed8e122fe5ee28ab46c70ebe2
|
[] |
no_license
|
utkarsh6191/daily_scripts
|
86a0de610394a7bb90004613ce8a999d7b48576c
|
ea27858e79a7225a3c868d437babc4cc5fefe322
|
refs/heads/master
| 2023-08-31T16:05:49.977676
| 2021-09-15T15:28:14
| 2021-09-15T15:28:14
| 369,470,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
import pymel.core as pm
"""
for geo in myGeos:
target_shape_orig = pm.listRelatives(geo, s=1)[1]
pm.transferAttributes('st1_prStoryBook01_mod_mod_base_v008a:' + geo, target_shape_orig, transferPositions=0,
transferNormals=0, transferUVs=2, transferColors=0, sampleSpace=5, targetUvSpace="map1",
searchMethod=3, flipUVs=0, colorBorders=1)
pm.delete(target_shape_orig, ch=1)
"""
sel = pm.ls(sl=1)
for s in sel:
# get joint skinned to the mesh
bind_joint = pm.listHistory(s, type="joint")
print bind_joint
# get skin cluster
source_skinClstr = pm.listHistory(s, type="skinCluster")[0]
print source_skinClstr
# get deformer sets
s_shape = pm.listRelatives(s, s=1)[-1]
print s_shape
# print s_shape
# deformer_set = pm.listSets(type =2, object = s_shape)[-1]
# print deformer_set
# get reference or duplicate mesh
# s_ref = "st1_prCaravansTajTej01_mod_mod_base_v005:"+ s
# pm.sets(deformer_set, add = s_ref)
# bind duplicate mesh to the joints
destination_skinClstr = pm.skinCluster(bind_joint, s_ref, tsb=True, bm=0, sm=0, nw=1)
# copy skin weights
pm.copySkinWeights(ss=source_skinClstr, ds=destination_skinClstr, noMirror=True)
|
[
"69736885+utkarsh6191@users.noreply.github.com"
] |
69736885+utkarsh6191@users.noreply.github.com
|
109682c64aad2d044c9cc951d3a773c4106965bc
|
6d39e9031c9ab28c094edc042559dc649308528e
|
/backend/manage.py
|
d84a5dce9a344fbcab5dc3e5650245daac85af08
|
[] |
no_license
|
crowdbotics-apps/test-31900
|
ead3f9341645ce981aa07dad2756548ffb84c5d7
|
d5b54e57dfc2839f1a9f7237deb4df7c3899e30d
|
refs/heads/master
| 2023-08-29T16:35:59.385571
| 2021-11-10T00:12:56
| 2021-11-10T00:12:56
| 426,427,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_31900.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
255410b55472771e9052c3f41b6a567096950e38
|
132d2cd3baa0dee49507aba6c5f08637f6928935
|
/others/tempCodeRunnerFile.py
|
c31699c93ec116ec4eee41094136c18b44e1cfe2
|
[] |
no_license
|
sahilg50/Python_DSA
|
9b897599eed91ea5f0d52fd8cbb658caf0830f5a
|
b0b1cb17facf84280db31a6fdfd00f3c0c2282bf
|
refs/heads/main
| 2023-07-16T14:29:37.090711
| 2021-08-24T20:23:44
| 2021-08-24T20:23:44
| 378,184,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33
|
py
|
hashmap = {}
print(hashmap['1'])
|
[
"56653026+sahilg50@users.noreply.github.com"
] |
56653026+sahilg50@users.noreply.github.com
|
4521005155955e13b993dedfd85d80e48bed9b57
|
1960d9339041a7fb73d55d9c2e6933237a710d89
|
/tob-api/api_indy/management/commands/verify_credential_index.py
|
194bdea6d46bbabc0018a18508857d674f0329f1
|
[
"Apache-2.0"
] |
permissive
|
bcgov/TheOrgBook
|
e604a1e8054b1385c5510cbf5fe725e442bf194a
|
f328926b1d72dea8333a259485017e7bc808208a
|
refs/heads/master
| 2021-05-05T10:22:49.020183
| 2020-11-24T22:12:23
| 2020-11-24T22:12:23
| 104,127,743
| 84
| 88
|
Apache-2.0
| 2020-11-24T22:15:53
| 2017-09-19T20:56:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,449
|
py
|
import asyncio
import argparse
import json
import os
import sys
import time
import aiohttp
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction, DEFAULT_DB_ALIAS
from django.db.models import signals
from api_v2.models.Address import Address
from api_v2.models.Attribute import Attribute
from api_v2.models.Credential import Credential
from api_v2.models.Name import Name
from tob_api.rocketchat_hooks import log_error, log_warning, log_info
from asgiref.sync import async_to_sync
API_BASE_URL = os.environ.get('API_BASE_URL', 'http://localhost:8080')
API_PATH = os.environ.get('API_PATH', '/api/v2')
SEARCH_API_PATH = os.environ.get('SEARCH_API_PATH', '/search/credential')
API_URL = "{}{}".format(API_BASE_URL, API_PATH)
class Command(BaseCommand):
help = "Verify the the indexes for all of the credentials."
def handle(self, *args, **options):
self.reprocess(*args, **options)
@async_to_sync
async def reprocess(self, *args, **options):
self.stdout.write("Starting ...")
cred_count = Credential.objects.count()
self.stdout.write("Verifying the indexes for {} credentials ...".format(cred_count))
async with aiohttp.ClientSession() as http_client:
current_cred = 0
for credential in Credential.objects.all().reverse().iterator():
current_cred += 1
self.stdout.write(
"\nVerifying index for credential id: {} ({} of {}) ...".format(
credential.id, current_cred, cred_count
)
)
try:
# Query search API using the wallet_id; credential.wallet_id
response = await http_client.get(
'{}{}'.format(API_URL, SEARCH_API_PATH),
params={ 'format':'json', 'latest':'any', 'revoked':'any', 'inactive':'any','wallet_id': credential.wallet_id}
)
self.stdout.write(
"\t{}"
.format(response.url))
if response.status != 200:
raise RuntimeError(
'Credential index could not be processed: {}'.format(await response.text())
)
result_json = await response.json()
except Exception as exc:
raise Exception(
'Could not verify credential index. '
'Is the OrgBook running?') from exc
credentialCount = result_json["total"]
if credentialCount < 1:
msg = "Error - No index was found for credential id: {}, wallet_id: {}".format(credential.id, credential.wallet_id)
self.stdout.write(msg)
await log_error(msg)
elif credentialCount > 1:
msg = "Error - More than one index was found for credential id: {}, wallet_id: {}".format(credential.id, credential.wallet_id)
self.stdout.write(msg)
await log_error(msg)
else:
msg = "Index successfully verified for credential id: {}, wallet_id: {}".format(credential.id, credential.wallet_id)
self.stdout.write(msg)
await log_info(msg)
|
[
"wade.barnes@shaw.ca"
] |
wade.barnes@shaw.ca
|
b165157f87e70a1070f197296b0d780668b1fc40
|
e15cb8739449fa5fad211d9d28dd58ff74269a77
|
/感知机/.ipynb_checkpoints/感知机-checkpoint.py
|
328be683ce92468e94f1a813a31c7ecb553d85b1
|
[] |
no_license
|
HG1227/ML
|
4815c299aa56cf72b4dc5811f6672421c638416f
|
1b92276690c57cf45b3e26125b7459e1bd236d16
|
refs/heads/master
| 2020-08-26T12:31:07.037477
| 2020-05-30T07:05:17
| 2020-05-30T07:05:17
| 217,008,927
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,896
|
py
|
#!/usr/bin/python
# coding:utf-8
# @software: PyCharm
# @file: 感知机.py
# @time: 2019/12/4
import numpy as np
def makeLinearSeparableData(weights, numLines):
'''
numFeatures 是一个正整数,代表特征的数量
:param weights:是一个列表,里面存储的是我们用来产生随机数据的那条直线的法向量。
:param numLines:是一个正整数,表示需要创建多少个数据点。
:return:最后返回数据集合。
'''
w = np.array(weights)
numFeatures = len(weights)
dataSet = np.zeros((numLines, numFeatures + 1))
for i in range(numLines):
x = np.random.rand(1, numFeatures) * 20 - 10
# 计算内积
innerProduct = np.sum(w * x)
if innerProduct <= 0:
# numpy 提供的 append 函数可以扩充一维数组,
dataSet[i] = np.append(x, -1)
else:
dataSet[i] = np.append(x, 1)
return dataSet
data = makeLinearSeparableData([4,3, 2], 100)
print(data)
# 将数据集可视化
def plotData(dataSet):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Linear separable data set')
ax.set_xlabel("X")
ax.set_ylabel("Y")
labels = np.array(dataSet[:, 2])
# where 函数是用来找出正例的行的下标
idx_1 = np.where(dataSet[:, 2] == 1)
p1 = ax.scatter(dataSet[idx_1, 0], dataSet[idx_1, 1], marker='o',
c='g', s=20, label=1)
idx_2 = np.where(dataSet[:, 2] == -1)
p2 = ax.scatter(dataSet[idx_2, 0], dataSet[idx_2, 1], marker='x',
color='r', s=20, label=2)
plt.legend(loc='upper right')
plt.show()
# plotData(data)
# 训练感知机,可视化分类器及其法向量
def train(dataSet, plot=False):
''' (array, boolean) -> list
Use dataSet to train a perceptron
dataSet has at least 2 lines.
'''
# 随机梯度下降算法
numLines = dataSet.shape[0]
numFearures = dataSet.shape[1]
w = np.zeros((1, numFearures - 1)) # initialize weights
separated = False
i = 0
while not separated and i < numLines:
if dataSet[i][-1] * np.sum(w * dataSet[i, 0:-1]) <= 0: # 如果分类错误
w = w + dataSet[i][-1] * dataSet[i, 0:-1] # 更新权重向量
separated = False # 设置为未完全分开
i = 0 # 重新开始遍历每个数据点
else:
i += 1 # 如果分类正确,检查下一个数据点
if plot == True:
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Linear separable data set')
plt.xlabel('X')
plt.ylabel('Y')
labels = np.array(dataSet[:, 2])
idx_1 = np.where(dataSet[:, 2] == 1)
p1 = ax.scatter(dataSet[idx_1, 0], dataSet[idx_1, 1],
marker='o', color='g', label=1, s=20)
idx_2 = np.where(dataSet[:, 2] == -1)
p2 = ax.scatter(dataSet[idx_2, 0], dataSet[idx_2, 1],
marker='x', color='r', label=2, s=20)
# 为了避免求得的权重向量长度过大在散点图中无法显示,所以将它按比例缩小了。
x = w[0][0] / np.abs(w[0][0]) * 10
y = w[0][1] / np.abs(w[0][0]) * 10
# ann = ax.annotate(u"", xy=(x, y),
# xytext=(0, 0), size=20, arrowprops=dict(arrowstyle="-|>"))
# 用来产生两个点的 y 值,以绘制一条直线(感知机)
ys = (-12 * (-w[0][0]) / w[0][1], 12 * (-w[0][0]) / w[0][1])
ax.add_line(Line2D((-12, 12), ys, linewidth=1, color='blue'))
plt.legend(loc='upper right')
plt.legend(loc='upper right')
plt.show()
return w
data = makeLinearSeparableData([4, 3], 100)
w = train(data, True)
|
[
"hongghu.@outlook.com"
] |
hongghu.@outlook.com
|
7c4c68f415ca42dd6a3d377d49232c08f9e0c006
|
d890428ae26410b7fa6bcc73e91cc5f1fde9f34a
|
/py/tump/test.py
|
bbfe3299a6f34dc6f98d84464976975770f91b8c
|
[] |
no_license
|
ansenfeng/note
|
6623bcd6a8c700ecb2ce2ae9c142eda31944fe15
|
e8b7cd6b837645b47b56969f9b5ccdc78a5d10f5
|
refs/heads/master
| 2020-07-06T06:04:14.584965
| 2020-02-06T01:46:22
| 2020-02-06T01:46:22
| 202,916,092
| 0
| 0
| null | 2019-08-17T18:20:47
| 2019-08-17T18:05:26
|
Python
|
UTF-8
|
Python
| false
| false
| 610
|
py
|
from tkinter import *
root = Tk()
root.title("网易云音乐")
root.geometry("800x600")
root.geometry("+250+100")
label = Label(root,text="请输入要下载的内容:",font=('华文行楷',25))
label.grid(row=0,column=0)
entry =Entry(root,font=('微软雅黑',25))
entry.grid(row=0,column=1)
text = Listbox(root,font=('微软雅黑',30),width=45,height=10)
text.grid(row=1,columnspan=2)
button = Button(root,text="开始下载",font=('微软雅黑',30))
button.grid(row=2,column=0,sticky=W)
button1 = Button(root,text="退出",font=('微软雅黑',30))
button1.grid(row=2,column=1,sticky=E)
root.mainloop()
|
[
"noreply@github.com"
] |
ansenfeng.noreply@github.com
|
61ee902f9aec9bdeff25f6e72569396187f62aff
|
01afa0be1c3acbf562fd87bd8fec8b4101c1e461
|
/Mining-Massive-Dataset/week5/advanced_quiz3.py
|
8c96a6d7d682c7d9d8f2ec6fe73c3b09bf879b97
|
[] |
no_license
|
listiani13/coursera
|
e4f1116cc619b62336c5bb4d2e714e7051ae775c
|
5c84cf7171a440261de639b53558e9767b1cd85e
|
refs/heads/master
| 2021-01-22T03:54:31.657656
| 2016-04-04T11:07:25
| 2016-04-04T11:07:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 424
|
py
|
from math import sqrt
def euclidean(x, y):
return sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2)
points = [(1, 6), (3, 7), (4, 3), (7, 7), (8, 2), (9, 5)]
chosen = [(0, 0), (10, 10)]
for _ in range(5):
pos, mx = -1, -1
for i, p in enumerate(points):
distance = min([euclidean(p, pc) for pc in chosen])
if distance > mx:
mx, pos = distance, i
print 'choose:', points[pos]
chosen.append(points[pos])
del points[pos]
|
[
"wangliangpeking@gmail.com"
] |
wangliangpeking@gmail.com
|
25cff27e63ff8317998d9326921d3ac3a56f1d83
|
96d7fafccb9e35ab862e679fb22b0d790c2a081b
|
/main/urls.py
|
6cf0d1300fa5f12e4dca18d76131354887260a82
|
[] |
no_license
|
folakemie5/notepadapp
|
695750be890662044c52dabee26e392e92bfb9c3
|
d2b304536fe45b8267e2b636b2ef0a1ce8868b35
|
refs/heads/main
| 2023-07-16T01:25:38.479877
| 2021-08-19T09:41:48
| 2021-08-19T09:41:48
| 397,222,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.notepads, name='notelist'),
path('<str:title>/<int:day>/<int:month>/<int:year>/', views.note_detail, name='note_detail')
]
|
[
"folakemie5@gmail.com"
] |
folakemie5@gmail.com
|
54d80f823fdf4e8beeb5810d52171050d781524e
|
804fa48b8f403025f7f08f582f036863d1b37659
|
/pe7.py
|
acf3aa4408635a8bac0f94919a8ce982b9f13ce7
|
[] |
no_license
|
pushpithaDilhan/ProjectEulerPython
|
78b15115a2bce059014c67c561ca17b124d598dd
|
bc14519f37d1496b683fb2339d0534ff33c22b9d
|
refs/heads/master
| 2021-01-10T04:49:24.491855
| 2017-04-12T06:03:11
| 2017-04-12T06:03:11
| 52,206,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
import time
s=time.time()
def isprime(n):
for i in range(3,int(n**0.5)+1,2):
if n%i==0:
return False
else:return True
a=[2,3,5]
i=7
while len(a)!=10001:
if isprime(i):
a.append(i)
i=i+2
print max(a)
e=time.time()-s
print "elapsed time is %f Seconds"%e
|
[
"pushpitha.14@cse.mrt.ac.lk"
] |
pushpitha.14@cse.mrt.ac.lk
|
fe66e54e53e158e66daa612d10faf13321626be2
|
f21389f75d371814c0f36452cbccff11bdb371ea
|
/exam/migrations/0026_auto_20210224_0016.py
|
8fd322b9410a84051b1e6f44d94bc1269d360cc8
|
[
"MIT"
] |
permissive
|
sezinbhr/bulut_bil
|
99d88182ff072863cdf10f588bbdd46206633ea1
|
70e6d56162f7c80e1494d8d4d263cbfb5a984be7
|
refs/heads/main
| 2023-08-02T22:08:17.467098
| 2021-09-15T13:27:13
| 2021-09-15T13:27:13
| 370,010,430
| 0
| 0
|
MIT
| 2021-05-23T09:27:25
| 2021-05-23T09:27:24
| null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
# Generated by Django 2.2.7 on 2021-02-23 21:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exam', '0025_auto_20210223_2358'),
]
operations = [
migrations.AlterField(
model_name='question',
name='correct_answer',
field=models.IntegerField(default=1),
),
]
|
[
"05170000782@ogrenci.ege.edu.tr"
] |
05170000782@ogrenci.ege.edu.tr
|
28cb977f5c9241e22b09e7c8b0923e0e7cf2487d
|
9b3ca4821d0c275df3c298c270cf544c8604648c
|
/hello-world.py
|
7b2ccf80923a3bc34b194ea68a5dd164a6661d6b
|
[] |
no_license
|
Noooneee/test_
|
520012f41c45f6a2804922955c571e858a712225
|
f3a0856273f885158d047b0fff7a8a25fb70b199
|
refs/heads/master
| 2022-11-09T05:57:36.854139
| 2020-06-27T02:55:35
| 2020-06-27T02:55:35
| 275,285,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 45
|
py
|
for i in range(10):
print("Hello World!")
|
[
"umar.mohammad7@outlook.com"
] |
umar.mohammad7@outlook.com
|
423d7f64ef9d3cef2e16119e39cb5f3152ae17b6
|
4fea4ac5b40e4dbaac2d32f5a155825bdb70d487
|
/tensorflow/rnn_long_sequence.py
|
4a903631941de6b4e7c3e2392452fa6548cfc9fa
|
[] |
no_license
|
JungAnJoon/mini_1
|
387c60586d1ebb4f99f1cc600c6b7817a29846c6
|
002c34bf87c11735cbbfccd8e47d813c8a83345b
|
refs/heads/master
| 2020-06-19T06:13:27.391102
| 2019-07-12T15:38:06
| 2019-07-12T15:38:06
| 196,593,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,202
|
py
|
# Lab 12 Character Sequence RNN
import tensorflow as tf
import numpy as np
tf.set_random_seed(777) # reproducibility
sample = " Hi, My name is Hongbeom Choi"
idx2char = list(set(sample)) # index -> char
char2idx = {c: i for i, c in enumerate(idx2char)} # char -> idex
# hyper parameters
dic_size = len(char2idx) # RNN input size (one hot size)
hidden_size = len(char2idx) # RNN output size
num_classes = len(char2idx) # final output size (RNN or softmax, etc.)
batch_size = 1 # one sample data, one batch
sequence_length = len(sample) - 1 # number of lstm rollings (unit #)
learning_rate = 0.1
sample_idx = [char2idx[c] for c in sample] # char to index
x_data = [sample_idx[:-1]] # X data sample (0 ~ n-1) hello: hell
y_data = [sample_idx[1:]] # Y label sample (1 ~ n) hello: ello
X = tf.placeholder(tf.int32, [None, sequence_length]) # X data
Y = tf.placeholder(tf.int32, [None, sequence_length]) # Y label
x_one_hot = tf.one_hot(X, num_classes) # one hot: 1 -> 0 1 0 0 0 0 0 0 0 0
cell = tf.contrib.rnn.BasicLSTMCell(
num_units=hidden_size, state_is_tuple=True)
initial_state = cell.zero_state(batch_size, tf.float32)
outputs, _states = tf.nn.dynamic_rnn(
cell, x_one_hot, initial_state=initial_state, dtype=tf.float32)
# FC layer
X_for_fc = tf.reshape(outputs, [-1, hidden_size])
outputs = tf.contrib.layers.fully_connected(X_for_fc, num_classes, activation_fn=None)
# reshape out for sequence_loss
outputs = tf.reshape(outputs, [batch_size, sequence_length, num_classes])
weights = tf.ones([batch_size, sequence_length])
sequence_loss = tf.contrib.seq2seq.sequence_loss(
logits=outputs, targets=Y, weights=weights)
loss = tf.reduce_mean(sequence_loss)
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
prediction = tf.argmax(outputs, axis=2)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(50):
l, _ = sess.run([loss, train], feed_dict={X: x_data, Y: y_data})
result = sess.run(prediction, feed_dict={X: x_data})
# print char using dic
result_str = [idx2char[c] for c in np.squeeze(result)]
print(i, "loss:", l, "Prediction:", ''.join(result_str))
|
[
"jaj1012@naver.com"
] |
jaj1012@naver.com
|
5629682ee3989a1d19e17ceb990d8d570b088d8a
|
69c7d1bdd81298a7131e4606b21f518a3d1de475
|
/notes/inheritance.py
|
98d3affec8ade7ba3185997a86edd4559eee0052
|
[] |
no_license
|
mendoncakr/testing
|
2990ce648c95ad97163204147a2414f6e22cd2ab
|
9b0ca08ca3c121fbcf2edf0474a77882affdc6b6
|
refs/heads/master
| 2021-01-22T18:51:33.867916
| 2015-07-09T03:45:14
| 2015-07-09T03:45:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
class Animal:
def __init__(self, sex, age, height, species):
self.sex = sex
self.age = age
self.height = height
self.species = species
def speak(self):
return "Hello, I am a(n) {}".format(self.species)
class Dog(Animal):
def __init__(self, sex, age, height, species, breed):
super().__init__(sex, age, height, species)
self.breed = breed
def speak(self):
return "WOOF"
class Cat(Animal):
def __init__(self, sex, age, height, species, breed, color):
super().__init__(sex, age, height, species)
self.breed = breed
self.color = color
def speak(self):
return "MEOW BETCH"
def my_color(self):
return "I'm {}".format(self.color)
animal = Animal('Female', 21, '20cm', 'animal')
print(animal.speak())
dog = Dog('Male', 18, '1cm','C. lupus', 'Husky')
print(dog.speak())
cat = Cat('Female', 1000, '0.5cm','F. catus', 'Siamese', 'Grey')
print(cat.speak())
print(cat.my_color())
|
[
"mendonca.kr@gmail.com"
] |
mendonca.kr@gmail.com
|
e24c46fd98029aac1b66598cac02ba5537c313d5
|
d89acc9364060c7628ef53409abdfd192c3b8837
|
/离散傅里叶变换/good.py
|
2217808d46a976025ca1ce0b2d9a486ce0a43294
|
[] |
no_license
|
yl763593864/Fourier-Transform
|
ac6aba866f0dc1e3fae787ecfac68d68c8d0063c
|
285f5ad5595262cef5cb39bee8e46cba04495176
|
refs/heads/master
| 2020-06-13T23:21:42.453485
| 2019-09-03T03:28:37
| 2019-09-03T03:28:37
| 194,820,850
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26
|
py
|
print("xxs")
#hello
#first
|
[
"yangsongtang@gmail.com"
] |
yangsongtang@gmail.com
|
c7c9c7b8c9cdf0529dfe969bd1b620285d0716ae
|
e00226c64b0926acfd11a1ef5ec2a49e24ecdfdf
|
/ETI06F1.py
|
06170ea829d3e6cdee3563448a848f45dc0d9f88
|
[] |
no_license
|
zetor6623/SPOJ_PL
|
a6b0bb4304828790565d8f141d5b6316421b8b78
|
9eff1e24ef25f7b1995e4dad2b07fd8d80a56abc
|
refs/heads/master
| 2020-04-09T09:58:57.103241
| 2018-12-03T21:04:58
| 2018-12-03T21:04:58
| 160,253,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
#!/usr/bin/python
import math
wejscie = [float(x) for x in raw_input().split()]
r = wejscie[0]
d = wejscie[1]
pi = 3.141592654
pole = ((r*r)-((d*d)/4))*pi
pole = round(pole,2)
print pole
|
[
"noreply@github.com"
] |
zetor6623.noreply@github.com
|
f32b08a5dadf9bf4dbc0b238e4cb160e93b689f5
|
3a01d6f6e9f7db7428ae5dc286d6bc267c4ca13e
|
/pylith/meshio/OutputMatElastic.py
|
75bd619e57bb719fa4f7cc5e470df1ff774171da
|
[
"MIT"
] |
permissive
|
youngsolar/pylith
|
1ee9f03c2b01560706b44b4ccae99c3fb6b9fdf4
|
62c07b91fa7581641c7b2a0f658bde288fa003de
|
refs/heads/master
| 2020-12-26T04:04:21.884785
| 2014-10-06T21:42:42
| 2014-10-06T21:42:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,792
|
py
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2014 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file pyre/meshio/OutputMatElastic.py
##
## @brief Python object for managing output of finite-element
## information for material state variables.
##
## Factory: output_manager
from OutputManager import OutputManager
# OutputMatElastic class
class OutputMatElastic(OutputManager):
"""
Python object for managing output of finite-element information for
material state variables.
Factory: output_manager
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(OutputManager.Inventory):
"""
Python object for managing OutputMatElastic facilities and properties.
"""
## @class Inventory
## Python object for managing OutputMatElastic facilities and properties.
##
## \b Properties
## @li \b cell_info_fields Names of cell info fields to output.
## @li \b cell_data_fields Names of cell data fields to output.
##
## \b Facilities
## @li None
import pyre.inventory
cellInfoFields = pyre.inventory.list("cell_info_fields",
default=["mu",
"lambda",
"density"])
cellInfoFields.meta['tip'] = "Names of cell info fields to output."
cellDataFields = pyre.inventory.list("cell_data_fields",
default=["total_strain", "stress"])
cellDataFields.meta['tip'] = "Names of cell data fields to output."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="outputmatelastic"):
"""
Constructor.
"""
OutputManager.__init__(self, name)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based using inventory.
"""
OutputManager._configure(self)
self.vertexInfoFields = []
self.vertexDataFields = []
self.cellInfoFields = self.inventory.cellInfoFields
self.cellDataFields = self.inventory.cellDataFields
return
# FACTORIES ////////////////////////////////////////////////////////////
def output_manager():
"""
Factory associated with OutputManager.
"""
return OutputMatElastic()
# End of file
|
[
"baagaard@usgs.gov"
] |
baagaard@usgs.gov
|
dd07566eb79e4e8773393ba441cf85898be88c05
|
37174351804fca73485d586a15cba70bd7e46cdc
|
/als-app/als_model.py
|
d978eee0e11743e5c8305dedafb953203cfdd632
|
[] |
no_license
|
sbartek/intro-to-pyspark
|
382d41169c6a59d26e4da7307f41877d009124ef
|
c8820024ec043b338455644ca8d559e210a7f966
|
refs/heads/master
| 2020-05-16T23:44:12.099782
| 2019-05-15T20:41:46
| 2019-05-15T20:41:46
| 183,376,909
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
import pyspark.sql.functions as F
from pyspark.ml.recommendation import ALS, ALSModel
class ALSRecModel:
def __init__(self, userCol, itemCol, rank=5, maxIter=2, spark=None):
self.userCol = userCol
self.itemCol = itemCol
self.rank = rank
self.maxIter = maxIter
self.als = ALS(
rank=self.rank, maxIter=self.maxIter,
userCol=self.userCol, itemCol=self.itemCol,
seed=666, implicitPrefs=True)
self.model = None
self.spark = spark
def fit(self, user_item_sdf):
self.model = self.als.fit(user_item_sdf)
def transform(self):
return self.model.recommendForAllUsers(10)
def save(self, file_name):
self.model.write().overwrite().save(file_name)
def load(self, file_name):
self.model = ALSModel.load(file_name)
|
[
"bartekskorulski@gmail.com"
] |
bartekskorulski@gmail.com
|
a736d5a5660159fb0615d48680b0d70ffdac597c
|
a2080cbcf9694ad03690769cfc64d85a57f1d9d5
|
/src/graphql/language/printer.py
|
842f251878846b17bd2c7f9e94bba434648fd747
|
[
"MIT"
] |
permissive
|
wuyuanyi135/graphql-core
|
84196a47aec0f9508db3f8aadb8951b9fc9b9fe0
|
169ae7bced0f515603e97f1def925f3d062e5009
|
refs/heads/main
| 2023-04-13T11:38:10.815573
| 2021-05-02T05:17:29
| 2021-05-02T05:21:58
| 363,327,364
| 1
| 0
|
MIT
| 2021-05-01T05:05:29
| 2021-05-01T05:05:28
| null |
UTF-8
|
Python
| false
| false
| 13,157
|
py
|
from functools import wraps
from json import dumps
from typing import Any, Callable, Collection, Optional
from ..language.ast import Node, OperationType
from .visitor import visit, Visitor
from .block_string import print_block_string
__all__ = ["print_ast"]
Strings = Collection[str]
class PrintedNode:
"""A union type for all nodes that have been processed by the printer."""
alias: str
arguments: Strings
block: bool
default_value: str
definitions: Strings
description: str
directives: str
fields: Strings
interfaces: Strings
locations: Strings
name: str
operation: OperationType
operation_types: Strings
repeatable: bool
selection_set: str
selections: Strings
type: str
type_condition: str
types: Strings
value: str
values: Strings
variable: str
variable_definitions: Strings
def print_ast(ast: Node) -> str:
"""Convert an AST into a string.
The conversion is done using a set of reasonable formatting rules.
"""
return visit(ast, PrintAstVisitor())
def add_description(method: Callable[..., str]) -> Callable:
"""Decorator adding the description to the output of a static visitor method."""
@wraps(method)
def wrapped(node: PrintedNode, *args: Any) -> str:
return join((node.description, method(node, *args)), "\n")
return wrapped
class PrintAstVisitor(Visitor):
@staticmethod
def leave_name(node: PrintedNode, *_args: Any) -> str:
return node.value
@staticmethod
def leave_variable(node: PrintedNode, *_args: Any) -> str:
return f"${node.name}"
# Document
@staticmethod
def leave_document(node: PrintedNode, *_args: Any) -> str:
return join(node.definitions, "\n\n") + "\n"
@staticmethod
def leave_operation_definition(node: PrintedNode, *_args: Any) -> str:
name, op, selection_set = node.name, node.operation, node.selection_set
var_defs = wrap("(", join(node.variable_definitions, ", "), ")")
directives = join(node.directives, " ")
# Anonymous queries with no directives or variable definitions can use the
# query short form.
return (
join((op.value, join((name, var_defs)), directives, selection_set), " ")
if (name or directives or var_defs or op != OperationType.QUERY)
else selection_set
)
@staticmethod
def leave_variable_definition(node: PrintedNode, *_args: Any) -> str:
return (
f"{node.variable}: {node.type}"
f"{wrap(' = ', node.default_value)}"
f"{wrap(' ', join(node.directives, ' '))}"
)
@staticmethod
def leave_selection_set(node: PrintedNode, *_args: Any) -> str:
return block(node.selections)
@staticmethod
def leave_field(node: PrintedNode, *_args: Any) -> str:
return join(
(
wrap("", node.alias, ": ")
+ node.name
+ wrap("(", join(node.arguments, ", "), ")"),
join(node.directives, " "),
node.selection_set,
),
" ",
)
@staticmethod
def leave_argument(node: PrintedNode, *_args: Any) -> str:
return f"{node.name}: {node.value}"
# Fragments
@staticmethod
def leave_fragment_spread(node: PrintedNode, *_args: Any) -> str:
return f"...{node.name}{wrap(' ', join(node.directives, ' '))}"
@staticmethod
def leave_inline_fragment(node: PrintedNode, *_args: Any) -> str:
return join(
(
"...",
wrap("on ", node.type_condition),
join(node.directives, " "),
node.selection_set,
),
" ",
)
@staticmethod
def leave_fragment_definition(node: PrintedNode, *_args: Any) -> str:
# Note: fragment variable definitions are experimental and may be changed or
# removed in the future.
return (
f"fragment {node.name}"
f"{wrap('(', join(node.variable_definitions, ', '), ')')}"
f" on {node.type_condition}"
f" {wrap('', join(node.directives, ' '), ' ')}"
f"{node.selection_set}"
)
# Value
@staticmethod
def leave_int_value(node: PrintedNode, *_args: Any) -> str:
return node.value
@staticmethod
def leave_float_value(node: PrintedNode, *_args: Any) -> str:
return node.value
@staticmethod
def leave_string_value(node: PrintedNode, key: str, *_args: Any) -> str:
if node.block:
return print_block_string(node.value, "" if key == "description" else " ")
return dumps(node.value)
@staticmethod
def leave_boolean_value(node: PrintedNode, *_args: Any) -> str:
return "true" if node.value else "false"
@staticmethod
def leave_null_value(_node: PrintedNode, *_args: Any) -> str:
return "null"
@staticmethod
def leave_enum_value(node: PrintedNode, *_args: Any) -> str:
return node.value
@staticmethod
def leave_list_value(node: PrintedNode, *_args: Any) -> str:
return f"[{join(node.values, ', ')}]"
@staticmethod
def leave_object_value(node: PrintedNode, *_args: Any) -> str:
return f"{{{join(node.fields, ', ')}}}"
@staticmethod
def leave_object_field(node: PrintedNode, *_args: Any) -> str:
return f"{node.name}: {node.value}"
# Directive
@staticmethod
def leave_directive(node: PrintedNode, *_args: Any) -> str:
return f"@{node.name}{wrap('(', join(node.arguments, ', '), ')')}"
# Type
@staticmethod
def leave_named_type(node: PrintedNode, *_args: Any) -> str:
return node.name
@staticmethod
def leave_list_type(node: PrintedNode, *_args: Any) -> str:
return f"[{node.type}]"
@staticmethod
def leave_non_null_type(node: PrintedNode, *_args: Any) -> str:
return f"{node.type}!"
# Type System Definitions
@staticmethod
@add_description
def leave_schema_definition(node: PrintedNode, *_args: Any) -> str:
return join(
("schema", join(node.directives, " "), block(node.operation_types)), " "
)
@staticmethod
def leave_operation_type_definition(node: PrintedNode, *_args: Any) -> str:
return f"{node.operation.value}: {node.type}"
@staticmethod
@add_description
def leave_scalar_type_definition(node: PrintedNode, *_args: Any) -> str:
return join(("scalar", node.name, join(node.directives, " ")), " ")
@staticmethod
@add_description
def leave_object_type_definition(node: PrintedNode, *_args: Any) -> str:
return join(
(
"type",
node.name,
wrap("implements ", join(node.interfaces, " & ")),
join(node.directives, " "),
block(node.fields),
),
" ",
)
@staticmethod
@add_description
def leave_field_definition(node: PrintedNode, *_args: Any) -> str:
args = node.arguments
args = (
wrap("(\n", indent(join(args, "\n")), "\n)")
if has_multiline_items(args)
else wrap("(", join(args, ", "), ")")
)
directives = wrap(" ", join(node.directives, " "))
return f"{node.name}{args}: {node.type}{directives}"
@staticmethod
@add_description
def leave_input_value_definition(node: PrintedNode, *_args: Any) -> str:
return join(
(
f"{node.name}: {node.type}",
wrap("= ", node.default_value),
join(node.directives, " "),
),
" ",
)
@staticmethod
@add_description
def leave_interface_type_definition(node: PrintedNode, *_args: Any) -> str:
return join(
(
"interface",
node.name,
wrap("implements ", join(node.interfaces, " & ")),
join(node.directives, " "),
block(node.fields),
),
" ",
)
@staticmethod
@add_description
def leave_union_type_definition(node: PrintedNode, *_args: Any) -> str:
return join(
(
"union",
node.name,
join(node.directives, " "),
"= " + join(node.types, " | ") if node.types else "",
),
" ",
)
@staticmethod
@add_description
def leave_enum_type_definition(node: PrintedNode, *_args: Any) -> str:
return join(
("enum", node.name, join(node.directives, " "), block(node.values)), " "
)
@staticmethod
@add_description
def leave_enum_value_definition(node: PrintedNode, *_args: Any) -> str:
return join((node.name, join(node.directives, " ")), " ")
@staticmethod
@add_description
def leave_input_object_type_definition(node: PrintedNode, *_args: Any) -> str:
return join(
("input", node.name, join(node.directives, " "), block(node.fields)), " "
)
@staticmethod
@add_description
def leave_directive_definition(node: PrintedNode, *_args: Any) -> str:
args = node.arguments
args = (
wrap("(\n", indent(join(args, "\n")), "\n)")
if has_multiline_items(args)
else wrap("(", join(args, ", "), ")")
)
repeatable = " repeatable" if node.repeatable else ""
locations = join(node.locations, " | ")
return f"directive @{node.name}{args}{repeatable} on {locations}"
@staticmethod
def leave_schema_extension(node: PrintedNode, *_args: Any) -> str:
return join(
("extend schema", join(node.directives, " "), block(node.operation_types)),
" ",
)
@staticmethod
def leave_scalar_type_extension(node: PrintedNode, *_args: Any) -> str:
return join(("extend scalar", node.name, join(node.directives, " ")), " ")
@staticmethod
def leave_object_type_extension(node: PrintedNode, *_args: Any) -> str:
return join(
(
"extend type",
node.name,
wrap("implements ", join(node.interfaces, " & ")),
join(node.directives, " "),
block(node.fields),
),
" ",
)
@staticmethod
def leave_interface_type_extension(node: PrintedNode, *_args: Any) -> str:
return join(
(
"extend interface",
node.name,
wrap("implements ", join(node.interfaces, " & ")),
join(node.directives, " "),
block(node.fields),
),
" ",
)
@staticmethod
def leave_union_type_extension(node: PrintedNode, *_args: Any) -> str:
return join(
(
"extend union",
node.name,
join(node.directives, " "),
"= " + join(node.types, " | ") if node.types else "",
),
" ",
)
@staticmethod
def leave_enum_type_extension(node: PrintedNode, *_args: Any) -> str:
return join(
("extend enum", node.name, join(node.directives, " "), block(node.values)),
" ",
)
@staticmethod
def leave_input_object_type_extension(node: PrintedNode, *_args: Any) -> str:
return join(
("extend input", node.name, join(node.directives, " "), block(node.fields)),
" ",
)
def join(strings: Optional[Strings], separator: str = "") -> str:
"""Join strings in a given collection.
Return an empty string if it is None or empty, otherwise join all items together
separated by separator if provided.
"""
return separator.join(s for s in strings if s) if strings else ""
def block(strings: Optional[Strings]) -> str:
"""Return strings inside a block.
Given a collection of strings, return a string with each item on its own line,
wrapped in an indented "{ }" block.
"""
return wrap("{\n", indent(join(strings, "\n")), "\n}")
def wrap(start: str, string: Optional[str], end: str = "") -> str:
"""Wrap string inside other strings at start and end.
If the string is not None or empty, then wrap with start and end, otherwise return
an empty string.
"""
return f"{start}{string}{end}" if string else ""
def indent(string: str) -> str:
"""Indent string with two spaces.
If the string is not None or empty, add two spaces at the beginning of every line
inside the string.
"""
return wrap(" ", string.replace("\n", "\n "))
def is_multiline(string: str) -> bool:
"""Check whether a string consists of multiple lines."""
return "\n" in string
def has_multiline_items(strings: Optional[Strings]) -> bool:
"""Check whether one of the items in the list has multiple lines."""
return any(is_multiline(item) for item in strings) if strings else False
|
[
"cito@online.de"
] |
cito@online.de
|
4b900f40ae107f6eed5132e247ac9bf751311707
|
9e4910f1af6ae6e0f338adb41cfa036a9bf37894
|
/ctblog/ctblog/settings.py
|
05cd74256e45965e4305ce9f703bca0abc71649a
|
[] |
no_license
|
codetubes/python_django
|
afb65524ec0be05aaa7ecf291bb0beaad71f290e
|
5e16694fc54ecff019707dea96459d873380692b
|
refs/heads/master
| 2022-04-20T12:06:32.915015
| 2020-04-11T16:57:05
| 2020-04-11T16:57:05
| 254,913,770
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,168
|
py
|
"""
Django settings for ctblog project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_ec7l7zxqm8pbsx0b9^nm_)3j!*n)z@zlr7kssfiy!n-%wsa25'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ctblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ctblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
|
[
"arman@armans-mbp.home"
] |
arman@armans-mbp.home
|
9e3e88e5e20b8620f29caa40607f2ce722da5e40
|
98f110f98055cc9d5bc1bb52807dddb98f9c3b32
|
/experiments/bytecode_update/compile_bytecode/extract_entry_points.py
|
45ed59fa43c73598055e7d39c12e0a3d62de1da9
|
[] |
no_license
|
sgadrat/super-tilt-bro-server
|
4f50336d4729dff08261a04a22d89392062da22d
|
1fa884e66c1ac819ecf47ba7f9cdec7e677507fe
|
refs/heads/master
| 2023-08-18T05:09:35.481574
| 2023-08-10T21:16:39
| 2023-08-10T21:16:39
| 166,117,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
#!/usr/bin/env python
import listing
import sys
import json
entry_points = []
# opcodes hex in string as they are represented in xa listing
JSR = '20'
BPL = '10'
BMI = '30'
BVC = '50'
BVS = '70'
BCC = '90'
BCS = 'b0'
BEQ = 'f0'
BNE = 'd0'
def on_line(_, line):
global entry_points
if line['address'] >= 0xc000:
if line['code'][0] != ' ':
# label
entry_points.append({'pc': line['address'], 'name': '{}'.format(line['code'].rstrip())})
elif line['data_repr'][:2] in [JSR]:
# JSR, good chance callee will return to next line
entry_points.append({'pc': line['address'] + 3})
elif line['data_repr'][:2] in [BPL, BMI, BVC, BVS, BCC, BCS, BEQ, BNE]:
# branching, "no" branch is the next instruction
entry_points.append({'pc': line['address'] + 2})
listing.parse_file(sys.argv[1], on_listing = on_line);
print(json.dumps(entry_points))
|
[
"sgadrat@wontfix.it"
] |
sgadrat@wontfix.it
|
2deb284c25b04ed8d2b413c44ceaf72090afcbc4
|
7af0bf24774db7703a60f8ab6300e293e9939ebd
|
/microbench/benchtest.py
|
b259b0b88499686a36a8a17d2e159641313f6c95
|
[] |
no_license
|
ctalbert/microbench
|
ce8e9fe2498f4db56af26de216836775b04fc5e3
|
e71cd1770e6eb626350a2d4fc9273f7dfc582bb5
|
refs/heads/master
| 2016-09-05T21:08:08.999820
| 2013-03-19T08:32:27
| 2013-03-19T08:32:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
import os
import sys
import json
import time
import re
import weakref
import time
from marionette import CommonTestCase
from marionette import Marionette
class MicroBenchTestCase(CommonTestCase):
match_re = re.compile(r"test_(.*)\.html$")
def __init__(self, marionette_weakref, methodName='run_test', htmlfile=None):
self.htmlfile = htmlfile
self._marionette_weakref = marionette_weakref
self.marionette = None
CommonTestCase.__init__(self, methodName)
@classmethod
def add_tests_to_suite(cls, mod_name, filepath, suite, testloader, marionette, testvars):
suite.addTest(cls(weakref.ref(marionette), htmlfile=filepath))
def run_test(self):
if self.marionette.session is None:
self.marionette.start_session()
self.marionette.test_name = os.path.basename(self.htmlfile)
# TODO: This is kind of a hack - depends on how we set up the httpd server
# Would be better to have marionette test runner pass in url
# TODO: For some reason mozhttpd isn't loading this URL not sure why
#self.url = self.marionette.baseurl + '/tests/' + os.path.basename(self.htmlfile)
self.url = 'http://localhost/%s' % os.path.basename(self.htmlfile)
print "DBG::URL is: %s" % self.url
self.marionette.execute_script("log('TEST-START: %s');" % self.htmlfile.replace('\\', '\\\\'))
self.marionette.set_context("chrome")
self.marionette.navigate(self.url)
# TODO: Set the timeouts by reading from the script file boilerplate: http://mxr.mozilla.org/mozilla-central/source/testing/marionette/client/marionette/marionette_test.py#186
self.marionette.set_script_timeout(10000)
# TODO: Should capture timeouts in try/except
results = self.marionette.execute_script('window.document.start_test();',
new_sandbox=False,
special_powers=True)
self.marionette.execute_script("log('TEST-END: %s');" % self.htmlfile.replace('\\', '\\\\'))
self.marionette.test_name = None
|
[
"ctalbert@mozilla.com"
] |
ctalbert@mozilla.com
|
cd18c91b29b30269577ecada14d674eb31496e86
|
1e95b2fe6d888604bb2529a3b054ffcadc311fce
|
/arrange_pichus.py
|
cacaff22b7551c8a86d5a477f16941a34d8c987c
|
[] |
no_license
|
bhargavsai77777/Route-Pichu
|
5a853a1b59fef0b66e590f7119300cf59b806546
|
5daa7c1c586dea4fd15f41ad717f8743cc62d07c
|
refs/heads/main
| 2023-05-26T06:52:11.048883
| 2021-06-12T03:07:53
| 2021-06-12T03:07:53
| 376,190,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,895
|
py
|
#!/usr/local/bin/python3
#
# arrange_pichus.py : arrange agents on a grid, avoiding conflicts
#
# Submitted by : [PUT YOUR NAME AND USERNAME HERE]
#
# Based on skeleton code in CSCI B551, Spring 2021
#
import sys
# Parse the map from a given filename
def parse_map(filename):
with open(filename, "r") as f:
return [[char for char in line] for line in f.read().rstrip("\n").split("\n")]
# Count total # of pichus on board
def count_pichus(board):
return sum([row.count('p') for row in board])
# Return a string with the board rendered in a human-pichuly format
def printable_board(board):
return "\n".join(["".join(row) for row in board])
'''This function validates the index that the successors function gives to add_pichu.
Furthur it checks,if we can add pichu at that particular position based on the below designed conditions.
This function returns a green signal to the add_pichu function if all goes well with the below defined conditions(chceking if pichu can see other pichu)'''
def validate_position(board, R, C):
check_dict = {'j': [0, 0, 0, 0]}
for w in range(0, C):
if (board[R][w] == 'p'): check_dict['j'][0] += 1
if (board[R][w] == 'X' or board[R][w] == '@'): check_dict['j'][0] = 0
for x in range(len(board[0]) - 1, C, -1):
if (board[R][x] == 'p'): check_dict['j'][1] += 1
if (board[R][x] == 'X' or board[R][x] == '@'): check_dict['j'][1] = 0
for y in range(0, R):
if (board[y][C] == 'p'): check_dict['j'][2] += 1
if (board[y][C] == 'X' or board[y][C] == '@'): check_dict['j'][2] = 0
for z in range(len(board) - 1, R, -1):
if (board[z][C] == 'p'): check_dict['j'][3] += 1
if (board[z][C] == 'X' or board[z][C] == '@'): check_dict['j'][3] = 0
if sum(check_dict['j']) > 0: return False
return True
# Add a pichu to the board at the given position, and return a new board (doesn't change original)
' In this function we are calling validate_position to check if we can add pichu at the given index'
def add_pichu(board, row, col):
if validate_position(board,row,col)==True:return board[0:row] + [board[row][0:col] + ['p',] + board[row][col+1:]] + board[row+1:]
else: return board
# Get list of successors of given board state
def successors(board):
return [add_pichu(board, r, c) for r in range(0, len(board)) for c in range(0, len(board[0])) if board[r][c] == '.']
# check if board is a goal state
def is_goal(board, k):
return count_pichus(board) == k
# Arrange agents on the map
#
# This function MUST take two parameters as input -- the house map and the value k --
# and return a tuple of the form (new_map, success), where:
# - new_map is a new version of the map with k agents,
# - success is True if a solution was found, and False otherwise.
#
'''' In the below solve function, I have just made a small modification by adding a visiting_node_list, it checks if the successsor is already visited.
Basically here we are using DFS, we know that it goes to infinite loop sometimes. To avoid these, i am not visiting alreday visited nodes.'''
def solve(initial_board, k):
fringe = [initial_board]
visiting_node_list = []
while len(fringe) > 0:
for s in successors( fringe.pop() ):
if s not in visiting_node_list:
if is_goal(s, k):
return(s,True)
visiting_node_list.append(s)
fringe.append(s)
return ([],False)
# Main Function
if __name__ == "__main__":
house_map = parse_map(sys.argv[1])
# This is K, the number of agents
k = int(sys.argv[2])
#k = 9
print("Starting from initial board:\n" + printable_board(house_map) + "\n\nLooking for solution...\n")
(newboard, success) = solve(house_map, k)
print("Here's what we found:")
print(printable_board(newboard) if success else "None")
|
[
"noreply@github.com"
] |
bhargavsai77777.noreply@github.com
|
e4e4cb04e8a4bd53f5c3ce46f4504a2164bb6517
|
be8dd95b894e0c0dae77fcfe174564c5c02d4c4b
|
/techpitgram/urls.py
|
2a7cec3e6775e2d1653b862fb5b22b7c25103e83
|
[] |
no_license
|
iKora128/techpitgram
|
3433c57d30838f48bf2eec619915edd12112e6af
|
c9a55eaa036f6dbc75a3216508a1a75f85765c17
|
refs/heads/master
| 2020-12-14T22:52:06.441596
| 2020-01-19T12:39:45
| 2020-01-19T12:39:45
| 234,898,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
"""techpitgram URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"nagashimadaichi@nagashimadaichinoMacBook-Pro.local"
] |
nagashimadaichi@nagashimadaichinoMacBook-Pro.local
|
ea592e1891a71f9b0b5333fa7e4f0802e01c234d
|
ad908dddab6bd9a87731881c071f8cefc2f25c0a
|
/app/recipe/views.py
|
6a362b235a2deb8b975ad554d945d452eeb85ecd
|
[
"MIT"
] |
permissive
|
krismwas/recipe-app
|
5eec41bdd4c56ec93e28cf27553e8b9eb2017d8d
|
19972e6cdafa00db96306e54b9bad715cb459463
|
refs/heads/master
| 2020-04-30T16:29:56.505771
| 2019-04-02T07:47:48
| 2019-04-02T07:47:48
| 176,949,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,070
|
py
|
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Tag, Ingredient, Recipe
from recipe import serializers
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage recipes in the database"""
serializer_class = serializers.RecipeSerializer
queryset = Recipe.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
# def get_queryset(self):
# """Retrieve the recipes for the authenticated user"""
# return self.queryset.filter(user=self.request.user)
def _params_to_ints(self, qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tag_ids = self._params_to_ints(tags)
queryset = queryset.filter(tags__id__in=tag_ids)
if ingredients:
ingredient_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredient_ids)
return queryset.filter(user=self.request.user)
def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == 'retrieve':
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerializer
return self.serializer_class
def perform_create(self, serializer):
"""Create a new recipe"""
serializer.save(user=self.request.user)
@action(methods=['POST'], detail=True, url_path='upload-image')
def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
# class TagViewSet(viewsets.GenericViewSet,
# mixins.ListModelMixin,
# mixins.CreateModelMixin):
# """Manage tags in the database"""
# """
# Please note this code has been improved
# by the code below with the class name of
# BaseRecipeAttrViewSet
# """
# authentication_classes = (TokenAuthentication,)
# permission_classes = (IsAuthenticated,)
# queryset = Tag.objects.all()
# serializer_class = serializers.TagSerializer
#
# def get_queryset(self):
# """Return objects for the current authenticated user only"""
# return self.queryset.filter(user=self.request.user).order_by('-name')
#
# def perform_create(self, serializer):
# """Create a new ingredient"""
# serializer.save(user=self.request.user)
#
#
# class IngredientViewSet(viewsets.GenericViewSet,
# mixins.ListModelMixin,
# mixins.CreateModelMixin):
# """Manage ingredients in the database"""
# authentication_classes = (TokenAuthentication,)
# permission_classes = (IsAuthenticated,)
# queryset = Ingredient.objects.all()
# serializer_class = serializers.IngredientSerializer
#
# def get_queryset(self):
# """Return objects for the current authenticated user only"""
# return self.queryset.filter(user=self.request.user).order_by('-name')
#
# def perform_create(self, serializer):
# """Create a new ingredient"""
# serializer.save(user=self.request.user)
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Base viewset for user owned recipe attributes"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Return objects for the current authenticated user only"""
return self.queryset.filter(user=self.request.user).order_by('-name')
def perform_create(self, serializer):
"""Create a new ingredient"""
serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tags in the database"""
queryset = Tag.objects.all()
serializer_class = serializers.TagSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage ingredients in the database"""
queryset = Ingredient.objects.all()
serializer_class = serializers.IngredientSerializer
|
[
"chrischrismwangi@gmail.com"
] |
chrischrismwangi@gmail.com
|
4234148e60b4e6ef9a4ee683926ae88a7bb1d316
|
465c407dc2196cf37009639b3e5ad2ee046c1e45
|
/app/api/mod_item/grey/deploy.py
|
47eacf4408d2ba54baac41439e5644b7bfa228f1
|
[
"BSD-3-Clause"
] |
permissive
|
TouchPal/guldan
|
9ef6f746d8625c821ace22da5b298d6ec7a8643a
|
74cc0bf687109d16c3eb94010b4cc25bd5c5bcc0
|
refs/heads/master
| 2023-03-08T19:55:26.286517
| 2018-01-08T06:56:07
| 2018-01-08T06:56:07
| 115,521,519
| 44
| 4
|
NOASSERTION
| 2022-01-21T18:56:19
| 2017-12-27T13:01:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
# -*- coding: utf-8 -*-
from flask import request
from app.api.utils.request_util import parse_request
from app.api.utils import response
from app.api.mod_item import item_blueprint
from app.api.mod_item.utils import ensure_item
from app.api.mod_item.validate import validate_for_item_modify
from app.api.mod_item.modify import item_modify
from .utils import ensure_grey_item, invalidate_cache_for_grey
def parse_grey_item_full_deploy_arguments(request):
op_info = parse_request(request)
return op_info
def item_full_deploy(resource_id=None, item=None, grey_item=None):
invalidate_cache_for_grey(item.id, item.name, grey_item.item_visibility)
item_modify(
item_id=resource_id, item_data=grey_item.item_data, item_type=grey_item.item_type,
item_visibility=grey_item.item_visibility, in_grey=False
)
@item_blueprint.route("/<int:item_id>/upgrade", methods=["POST"])
@response.dict_response_deco
def full_deploy_item(item_id):
item = ensure_item(item_id)
grey_item = ensure_grey_item(item_id)
op_info = parse_grey_item_full_deploy_arguments(request)
validate_for_item_modify(
op_info.user_hash,
item_id
)
item_full_deploy(resource_id=item_id, item=item, grey_item=grey_item)
return {
"msg": "OK"
}
|
[
"alex.zheng@cootek.cn"
] |
alex.zheng@cootek.cn
|
cbc684ff9ae4dd85231ece8eaed2a8851b6264ba
|
deaf60a5ba012e68f8509c0df0d35a5228419b71
|
/找商网/zhao_shang_wang_changxin/zhao_shang_wang/spiders/spider_data.py
|
a34737cdfa962283d95ea12c2c4ffaafadfb4f46
|
[] |
no_license
|
kokohui/con_spider
|
7162d8e58725d9334db5f1da34649cd1d1ef29ea
|
da1181b53e5cbca546d1bb749f9efc2f48e698f8
|
refs/heads/master
| 2022-03-03T19:37:33.721533
| 2019-08-22T10:05:32
| 2019-08-22T10:05:32
| 193,631,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,365
|
py
|
# -*- coding: utf-8 -*-
from ..items import ZhaoShangWangItem
import scrapy
from scrapy import Request
from bs4 import BeautifulSoup
import os
import random
import requests
import pymysql
import time
import re
import jieba.analyse
conn = pymysql.connect(host='192.168.1.210', user='root', passwd='zhangxing888', db='ktcx_buschance', port=3306,
charset='utf8')
cur = conn.cursor() # 获取一个游标
class SpiderDataSpider(scrapy.Spider):
name = 'spider_data'
# start_urls = ['https://www.zhaosw.com/product/search/1541291/2']
def start_requests(self):
sql_id = "SELECT url FROM bus_spider_data WHERE source = '找商网' and TYPE = 'gongying' AND is_del = '0' AND isuse = '0' ORDER BY create_date LIMIT 1 "
cur.execute(sql_id)
res_all_list = cur.fetchall()
url = res_all_list[0][0]
for num in range(1, 2):
url_2 = 'https://www.zhaosw.com/product/search/{}/{}'.format(url, num)
print(url_2)
yield Request(url=url_2, callback=self.parse)
def parse(self, response):
detail_url = response.xpath('//*[@id="productForm"]/div[@class="m-product-list"]/a/@href')[0].extract()
yield Request(url=detail_url, callback=self.parse_2)
def parse_2(self, response):
res_url = response.xpath('/html/body/header/div/div[4]/div/div/div/ul/li[2]/a/@href')[0].extract()
yield Request(url=res_url, callback=self.parse_3)
def parse_3(self, response):
pro_url_list = response.xpath('//*[@id="productForm"]/div[3]/div/a/@href').extract()
for pro_url in pro_url_list:
yield Request(url=pro_url, callback=self.parse_detail)
def parse_detail(self, response):
item = ZhaoShangWangItem()
mobile = ''
result_count = 0
try:
mobile = response.xpath('//p[@class="p3"]/span[@class="span2"]/text()')[0].extract().strip()
com_name = str(response.xpath('//p[@class="p-title"]/a/text()').extract()[0]).strip()
sql_count = "select count(0) from bus_user where company_name='{}'".format(com_name)
cur.execute(sql_count)
result = cur.fetchall()
result_count = int(result[0][0])
except:
print('没有手机号或公司重复')
if mobile != '' and result_count == 0:
print('................................................')
# 数据库获取id
sql_id = "SELECT one_level,two_level,three_level,keyword FROM bus_spider_data WHERE source = '找商网' and TYPE = 'gongying' AND is_del = '0' AND isuse = '0' ORDER BY create_date LIMIT 1 "
cur.execute(sql_id)
print('sql_id?????????????', sql_id)
res_all_list = cur.fetchall()
for res_all in res_all_list:
one_level = res_all[0]
item['one_level_id'] = str(one_level)
print('id.........', item['one_level_id'])
two_level = res_all[1]
item['two_level_id'] = str(two_level)
print('id.........', item['two_level_id'])
three_level = res_all[2]
item['three_level_id'] = str(three_level)
print('id.........', item['three_level_id'])
keywords = res_all[-1]
item['keywords'] = str(keywords)
# 保存商品图片
os_img_2_list = []
try:
str_ran = str(random.randint(0, 999999))
os.makedirs('/home/imgServer/hc/{}'.format(str_ran))
# 将图片链接保存到硬盘
res_img = response.xpath('//*[@id="productImage"]/div[2]/ul/li/a/img/@src')
for img_url in res_img:
img_url = img_url.extract()
img_url = 'https:' + img_url.strip()
img_url = re.sub('\.\.\d+x\d+.jpg', '', img_url)
print('img_url>>>>>>>>>>>>><<<<<<<<<<<<<<<<<::::::', img_url)
code_img = requests.get(url=img_url).content
img_name = str(random.randint(1, 999999))
with open('/home/imgServer/hc/{}/{}.jpg'.format(str_ran, img_name), 'wb') as f:
f.write(code_img)
os_img_2 = 'http://img.youkeduo.com.cn/hc/' + '{}/{}.jpg'.format(str_ran, img_name)
os_img_2_list.append(os_img_2)
os_img_2_str_1 = os_img_2_list[0]
os_img_2_str = ','.join(os_img_2_list)
item['list_img'] = os_img_2_str_1
item['imgs'] = os_img_2_str
print('图片ok', os_img_2_list)
except:
print('图片错误.')
# 创建时间
create_date = time.strftime('%Y.%m.%d %H:%M:%S ', time.localtime(time.time()))
item['create_date'] = create_date
# 价格
price = ''
try:
price = str(response.xpath('/html/body/main/div[4]/div[1]/div[2]/div[2]/div[1]/div/span/text()').extract()[0].strip())
if price.startswith('¥'):
price = price[1:]
if not price:
price = '面议'
print('price', price)
except:
print('price', price)
item['price'] = price
# 标题
title = ''
try:
title = str(response.xpath('/html/body/main/div[4]/div[1]/div[2]/div[1]/h4/text()').extract()[0])
print('title', title)
except:
print('title', title)
item['title'] = title
# way
if price != '':
way = '0'
else:
way = '1'
item['way'] = way
res_detail_html = response.text
try:
soup = BeautifulSoup(res_detail_html, 'lxml')
html_1 = str(soup.find('div', class_="parameter-body"))
html = str(soup.find('div', class_="introduction-body clearfix"))
# print(html)
strinfo = re.compile('<img.*?>')
html_2 = strinfo.sub('', html)
strinfo = re.compile('<br.*?>')
html_3 = strinfo.sub('', html_2)
strinfo = re.compile('慧聪网')
html_4 = strinfo.sub('优客多', html_3)
# 把下载图片添加到html
div_list = ['<div id="img_detail">', '</div>']
for os_img_2_url in os_img_2_list:
os_img_2_url = '<img alt="{}" src="{}">'.format(title, os_img_2_url)
div_list.insert(1, os_img_2_url)
div_str = '\n'.join(div_list)
html_all = html_1 + html_4 + '\n' + div_str
# print(html_all)
except Exception as e:
raise e
item['detail'] = str(html_all)
# units
units = ''
try:
units = response.xpath('/html/body/main/div[4]/div[1]/div[2]/div[2]/div[1]/div/text()').extract()[-1]
units = units.strip().replace('/', '').replace('\n', '')
print('units', units)
except:
print('units', units)
item['units'] = units
# com_name
com_name = ''
try:
com_name = str(response.xpath('//p[@class="p-title"]/a/text()').extract()[0]).strip()
print('com_name', com_name)
except:
print('com_name', com_name)
item['com_name'] = com_name
# linkman
linkman = ''
try:
linkman = re.findall('<span.*?>联系人:</span><span.*?>(.*?)</span>', response.text)[0]
print('linkman', linkman)
except:
print('linkman', linkman)
item['linkman'] = linkman
# mobile
mobile = ''
try:
mobile = response.xpath('//p[@class="p3"]/span[@class="span2"]/text()')[0].extract().strip()
print('mobile', mobile)
except:
print('mobile', mobile)
item['mobile'] = mobile
# address
address = ''
try:
address = re.findall('<span.*?>所在地区:</span><span.*?>(.*?)</span>', response.text)[0]
print('address', address)
except:
print('address', address)
item['address'] = address
scopes = '-'
try:
scopes = response.xpath('//div[@class="p7-content"]/span[2]/a/text()').extract()
scopes = str(scopes).strip('[').strip(']').replace("'", "").replace(",", " ")
print('scopes', scopes)
except:
print('scopes', scopes)
item['scopes'] = scopes
summary = ''
try:
summary = response.xpath('//div[@class="p-contain"]/p[@class="p4"]/span[2]/text()')[0].extract()
print('summary>>>>>>>>>>>>>>>', summary)
except:
print('summary', summary)
item['summary'] = summary
yield item
|
[
"2686162923@qq.com"
] |
2686162923@qq.com
|
f99a9206e3aad2da48d0857fb2f4ee34a77b55d7
|
df94467e24ce38f566e5d96f179de4ad365e1f1d
|
/appmain/migrations/0109_auto_20200328_0724.py
|
34e670648898ffa848f3053bd26e0e5437cc1f25
|
[] |
no_license
|
dhchamber/PigSkinners
|
498f931d112692c0cef3767cd2a6ca8b1014bcb1
|
9accd0dbd4f8ed93720b708117d42aab662569f2
|
refs/heads/master
| 2023-04-07T14:32:37.649907
| 2020-05-09T15:28:50
| 2020-05-09T15:28:50
| 231,812,836
| 1
| 0
| null | 2023-03-31T14:52:07
| 2020-01-04T18:53:52
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,403
|
py
|
# Generated by Django 3.0.1 on 2020-03-28 13:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('appmain', '0108_auto_20200325_0730'),
]
operations = [
migrations.CreateModel(
name='PickRevision',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('revision', models.PositiveSmallIntegerField()),
('points', models.PositiveSmallIntegerField()),
('pick_score', models.PositiveSmallIntegerField(default=0)),
('saved', models.BooleanField(default=False)),
('entered_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pick_rev_entered', to=settings.AUTH_USER_MODEL)),
('koth_game', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='pick_rev_koth_game', to='appmain.Game')),
('koth_team', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='pick_rev_koth_team', to='appmain.Team')),
('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pick_rev_updated', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='pick_revs', to=settings.AUTH_USER_MODEL)),
('wk', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='pick_rev_wk', to='appmain.Week')),
],
options={
'verbose_name': 'pick revision',
'verbose_name_plural': 'pick revisions',
'ordering': ['user', 'wk'],
},
),
migrations.AddIndex(
model_name='pickrevision',
index=models.Index(fields=['revision', 'user', 'wk'], name='appmain_pic_revisio_2d0954_idx'),
),
migrations.AddConstraint(
model_name='pickrevision',
constraint=models.UniqueConstraint(fields=('revision', 'user', 'wk'), name='pickrev_user_wk'),
),
]
|
[
"dhchamber@msn.com"
] |
dhchamber@msn.com
|
b8349e59ee37806bbb211823d84042788796f8c0
|
8ca6ddae047850046e4eaf33d1820bd917c2d822
|
/src/app/util/mail.py
|
7fb672b41c2b901b60101023f854c2f81add3efa
|
[] |
no_license
|
AllenSix/homekit
|
0582017588481c773689355c8b2164835ca5955d
|
29ad509893aaecf4b518c0e3468db7e2eb43d1e5
|
refs/heads/master
| 2020-05-17T22:03:06.176706
| 2020-01-09T14:36:11
| 2020-01-09T14:36:11
| 183,989,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18-6-20 上午10:27
# @Author : Skye
# @Site :
# @File : mail.py
# @Software: PyCharm
import smtplib
from email.header import Header
from email.mime.text import MIMEText
from src.app.config import *
def send_email(title, content, receivers):
message = MIMEText(content, 'plain', 'utf-8') # 内容, 格式, 编码
message['From'] = "{}".format(MAIL_SENDER)
message['To'] = receivers
message['Subject'] = title
try:
smtpObj = smtplib.SMTP_SSL(MAIL_HOST, 465) # 启用SSL发信, 端口一般是465
# smtpObj = smtplib.SMTP(MAIL_HOST, 587) # 启用SSL发信, 端口一般是465
# smtpObj.ehlo()
# smtpObj.starttls()
smtpObj.login(MAIL_USER, MAIL_PASS) # 登录验证
smtpObj.sendmail(MAIL_SENDER, receivers, message.as_string()) # 发送
print("mail has been send successfully.")
except smtplib.SMTPException as e:
print(e)
|
[
"csf71106410@163.com"
] |
csf71106410@163.com
|
5bd234d032a1cef724c7d19f94ecdca75497c3b5
|
803bab6f782099d995bcdb99d163486f4fff8c50
|
/test/test_pointnav_resnet_policy.py
|
f58a4a45e857196c0ab6b215a39c3fce54de9832
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-NC-SA-3.0"
] |
permissive
|
facebookresearch/habitat-lab
|
7088506509f64da6d682f5dc69427589f71a58a9
|
f5b29e62df0788d70ba3618fc738fa4e947428ba
|
refs/heads/main
| 2023-08-24T14:00:02.707343
| 2023-08-23T04:53:48
| 2023-08-23T04:53:48
| 169,164,391
| 792
| 298
|
MIT
| 2023-09-14T15:20:03
| 2019-02-04T23:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 4,432
|
py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os.path
import shlex
import subprocess
import numpy as np
import pytest
import torch
from gym import spaces
from habitat import read_write
from habitat_baselines.config.default import get_config
from habitat_baselines.rl.ddppo.policy import PointNavResNetPolicy
ACTION_SPACE = spaces.Discrete(4)
OBSERVATION_SPACES = {
"depth_model": spaces.Dict(
{
"depth": spaces.Box(
low=0,
high=1,
shape=(256, 256, 1),
dtype=np.float32,
),
"pointgoal_with_gps_compass": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(2,),
dtype=np.float32,
),
}
),
"rgb_model": spaces.Dict(
{
"rgb": spaces.Box(
low=0,
high=255,
shape=(256, 256, 3),
dtype=np.uint8,
),
"pointgoal_with_gps_compass": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(2,),
dtype=np.float32,
),
}
),
"blind_model": spaces.Dict(
{
"pointgoal_with_gps_compass": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(2,),
dtype=np.float32,
),
}
),
}
MODELS_DEST_DIR = "data/ddppo-models"
MODELS_BASE_URL = "https://dl.fbaipublicfiles.com/habitat/data/baselines/v1/ddppo/ddppo-models"
MODELS_TO_TEST = {
"gibson-2plus-resnet50.pth": {
"backbone": "resnet50",
"observation_space": OBSERVATION_SPACES["depth_model"],
"action_space": ACTION_SPACE,
},
"gibson-2plus-mp3d-train-val-test-se-resneXt50-rgb.pth": {
"backbone": "se_resneXt50",
"observation_space": OBSERVATION_SPACES["rgb_model"],
"action_space": ACTION_SPACE,
},
"gibson-0plus-mp3d-train-val-test-blind.pth": {
"backbone": None,
"observation_space": OBSERVATION_SPACES["blind_model"],
"action_space": ACTION_SPACE,
},
}
def _get_model_url(model_name):
return f"{MODELS_BASE_URL}/{model_name}"
def _get_model_path(model_name):
return f"{MODELS_DEST_DIR}/{model_name}"
@pytest.fixture(scope="module", autouse=True)
def download_data():
for model_name in MODELS_TO_TEST:
model_url = _get_model_url(model_name)
model_path = _get_model_path(model_name)
if not os.path.exists(model_path):
print(f"Downloading {model_name}.")
download_command = (
"wget --continue " + model_url + " -P " + MODELS_DEST_DIR
)
subprocess.check_call(shlex.split(download_command))
assert os.path.exists(
model_path
), "Download failed, no package found."
@pytest.mark.parametrize(
"pretrained_weights_path,backbone,observation_space,action_space",
[
(
_get_model_path(model_name),
params["backbone"],
params["observation_space"],
params["action_space"],
)
for model_name, params in MODELS_TO_TEST.items()
],
)
def test_pretrained_models(
pretrained_weights_path, backbone, observation_space, action_space
):
config = get_config(
"test/config/habitat_baselines/ddppo_pointnav_test.yaml"
)
with read_write(config):
ddppo_config = config.habitat_baselines.rl.ddppo
ddppo_config.pretrained = True
ddppo_config.pretrained_weights = pretrained_weights_path
if backbone is not None:
ddppo_config.backbone = backbone
policy = PointNavResNetPolicy.from_config(
config=config,
observation_space=observation_space,
action_space=action_space,
)
pretrained_state = torch.load(pretrained_weights_path, map_location="cpu")
prefix = "actor_critic."
policy.load_state_dict(
{ # type: ignore
k[len(prefix) :]: v
for k, v in pretrained_state["state_dict"].items()
}
)
|
[
"noreply@github.com"
] |
facebookresearch.noreply@github.com
|
68202dcd675d0bb11daa99543c2774c9b913f26e
|
df43c1539d5e46d88213f09c008a6c28d2c0cb49
|
/barbers/migrations/0010_auto_20201121_1348.py
|
4016964d6cda0064e22ab1b23b202746256997ed
|
[] |
no_license
|
doctor-evans/barbershub
|
345ae0f745f88ed146e4a997e62a730029e9a6a1
|
3fdfa45d5cfecbe76aeb854409ffcd71eae5df9d
|
refs/heads/master
| 2023-01-24T00:34:43.200586
| 2020-11-22T12:48:37
| 2020-11-22T12:48:37
| 315,028,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
# Generated by Django 3.1.3 on 2020-11-21 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('barbers', '0009_productitem_slug'),
]
operations = [
migrations.AlterField(
model_name='productitem',
name='slug',
field=models.SlugField(unique=True),
),
]
|
[
"evanschan200@gmail.com"
] |
evanschan200@gmail.com
|
9394166569d8caffdd6df13892890bf089cd7837
|
baaca529b304e18165d575fb727984e95a59db80
|
/2.X/examples/list_assignment.py
|
22657c9a0dc57c86845005c8ced261b1f7dec706
|
[] |
no_license
|
BartVandewoestyne/Python
|
fae9dfccdf5e7864103a2ec8f4b919abd2862d16
|
9add88babb2dac05139196297771b33e412d38e5
|
refs/heads/master
| 2021-11-17T22:35:37.202392
| 2021-10-22T19:21:09
| 2021-10-22T19:21:09
| 38,354,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
# Example on list assignment.
#
# References:
#
# [1] http://robertheaton.com/2014/02/09/pythons-pass-by-object-reference-as-explained-by-philip-k-dick/
# [2] http://stackoverflow.com/questions/12888506/assignment-operator-about-list-in-python
listA = [0]
listB = listA
listB.append(1)
print listA
list1 = ["Tom", "Sam", "Jim"]
list2 = list1
print id(list1)
print id(list2)
list3 = list1[:]
print id(list3)
|
[
"Bart.Vandewoestyne@telenet.be"
] |
Bart.Vandewoestyne@telenet.be
|
62b12bf8bb74f8a2775b4c0482a8770435327642
|
441f5001e8f2136b8ae048a7eae27d4dd148ddd8
|
/src/sortepy/util.py
|
f6b5dfbc48e7a169b6b4447e1fa687da5e0cd79f
|
[] |
no_license
|
guilhermaker/sorte.py
|
97ac3aaea128c718d89ba4cd3c0fccab7eacefab
|
eeb3a1e850ebd07c70df3fa3499cc6290b2b7b0c
|
refs/heads/master
| 2020-04-13T16:54:06.590898
| 2017-12-07T03:06:43
| 2017-12-07T03:19:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,677
|
py
|
# encoding=utf8
import http.cookiejar
import errno
import os
import re
import sqlite3
import urllib.request
import time
def get_config_path(app='sortepy'):
"""Obtém o caminho de configuração de acordo com o SO
Por enquanto é suportado os sistemas POSIX e Windows (NT)
"""
# Linux, UNIX, BSD, ...
if os.name == 'posix':
prefixo = '.config/'
profile_dir = os.environ.get("HOME")
# Windows 2000, XP, Vista, 7, 8, ...
elif os.name == 'nt':
prefixo = ''
profile_dir = os.environ.get("APPDATA")
# Se nenhum SO suportado foi detectado, lança uma exceção
else:
raise NotImplementedError("Caminho de configuração não detectado")
return os.path.join(profile_dir, prefixo + app)
def makedirs(caminho):
"""Versão própria do makedirs()
Essa versão não lança exceção se o caminho já existir
"""
try:
os.makedirs(caminho)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Util:
def __init__(self, cfg_path=None):
# Se o caminho é uma string vazia, não deve ser usado nenhum cache
# Definido para propósitos de teste
if cfg_path == '':
self.in_cache = False
return
# Se nenhum caminho foi passado, usa diretório de configuração padrão
if cfg_path is None:
try:
cfg_path = get_config_path()
# Pode ocorrer de não conseguir definir o diretório para cfg_path
except NotImplementedError:
self.in_cache = False
return
# Cria diretório de configuração, se não existir
makedirs(cfg_path)
# caminho do arquivo de cache
self.cache_path = os.path.join(cfg_path, 'cache.db')
# Define atributos de configuração
self.pages_db = self.get_mapdb('paginas')
self.in_cache = True
def get_mapdb(self, name):
return FileDB.open(self.cache_path, name)
def download(self, url, in_cache=None):
in_cache = in_cache if isinstance(in_cache, bool) else self.in_cache
# Obtém a página do cache
conteudo = None
if in_cache:
conteudo = self.cache(url)
# Ou faz o download
if conteudo is None:
# As páginas de resultado de loterias exigem cookies
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
# A adição desse cookie dobra o tempo de resposta
opener.addheaders.append(("Cookie", "security=true"))
page = opener.open(url)
conteudo = page.read()
charset = page.headers.get_param('charset')
if charset is not None:
conteudo = conteudo.decode(charset)
else:
conteudo = conteudo.decode()
if in_cache:
self.cache(url, conteudo)
return conteudo
def cache(self, url, conteudo=None):
# Sem conteúdo: leitura do cache
if conteudo is None:
if url not in self.pages_db:
return None
# obtém a entrada do cache
result = self.pages_db[url]
# se for uma entrada suja, verifica se já venceu o tempo para ficar nesse estado
if self.is_dirty(result):
timestamp, _ = result.split('|', 1)
if time.time() > int(timestamp) + 1800:
del self.pages_db[url]
return None
else:
return result
# Do contrário: escrita no cache
else:
self.pages_db[url] = conteudo
def blame(self, url):
"""Marca o resultado de uma URL como inválida.
Isso é feito, registrando o horário em que esse método foi chamado.
"""
if self.in_cache and url in self.pages_db:
self.pages_db[url] = "%d|" % int(time.time())
DIRTY_RE = re.compile(r'^[0-9]+\|')
@classmethod
def is_dirty(cls, s):
return cls.DIRTY_RE.match(s)
class FileDB:
@staticmethod
def open(filename, prefix=''):
db = FileDB._SQLite3(filename, prefix)
return db
class _SQLite3(object):
__version__ = 0 # por enquanto não serve para nada
def __init__(self, filename, prefix=''):
self._con = sqlite3.connect(filename)
self._table = prefix + 'map'
self._create_schema()
def close(self):
self._con.commit()
self._con.close()
def flush(self):
self._con.commit()
def __del__(self):
try:
self.close()
except sqlite3.Error:
pass
def _create_schema(self):
try:
self._con.execute("CREATE TABLE %s (key TEXT PRIMARY KEY, value TEXT)" % self._table)
self._write_dbversion(self.__version__)
# caso a tabela 'map' já exista
except sqlite3.OperationalError:
pass
def _read_dbversion(self):
(dbversion,) = self._con.execute('PRAGMA user_version').fetchone()
return dbversion
def _write_dbversion(self, version):
self._con.execute('PRAGMA user_version = %d' % version)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __setitem__(self, key, value):
with self._con as con:
try:
con.execute("INSERT INTO %s VALUES (?, ?)" % self._table, (key, value))
except sqlite3.IntegrityError:
con.execute("UPDATE %s SET value=? WHERE key=?" % self._table, (value, key))
def __getitem__(self, key):
cursor = self._con.cursor()
cursor.execute("SELECT value FROM %s WHERE key=?" % self._table, (key,))
result = cursor.fetchone()
if result:
return result[0]
else:
raise KeyError(key)
def __delitem__(self, key):
with self._con as con:
con.execute("DELETE FROM %s WHERE key=?" % self._table, (key,))
def __contains__(self, key):
cursor = self._con.cursor()
cursor.execute("SELECT 1 FROM %s WHERE key=?" % self._table, (key,))
return cursor.fetchall() != []
def __enter__(self):
return self
def __exit__(self, *args):
self.__del__()
|
[
"wagnerluis1982@gmail.com"
] |
wagnerluis1982@gmail.com
|
258340149c613722be8d1cfed6c2f43bf4840d6b
|
69eb034a1d086d1b2ce1f1083df2b2fd74b9f5cc
|
/train_model_rmre.py
|
7396f5c35cbd7cd3cf08f2c365cdd892b4978ffc
|
[] |
no_license
|
webdxq/genarate_blessing
|
2c0f6afc55f4c507750911802a80fe299a4690d6
|
a08a09071edf687dcb512713daea1daf00450383
|
refs/heads/master
| 2020-03-29T01:38:33.419803
| 2018-09-19T06:10:26
| 2018-09-19T06:10:26
| 145,077,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,938
|
py
|
#!/usr/bin/python3
#-*- coding: UTF-8 -*-
import collections
import numpy as np
import tensorflow as tf
import os
import sys
import chardet
import re
import json
import time
from datetime import datetime
reload(sys)
sys.setdefaultencoding('utf8')
# os.environ['CUDA_VISIBLE_DEVICES']='1'
#-------------------------------数据预处理---------------------------#
# poetry_file ='../data/poetry.txt'
minlen = 4
maxlen = 15
blessing_file ='/home/pingan_ai/dxq/project/gen_blessing/dataset/data/line_lyrics.txt'
blessings = []
all_words = []
cantoneses = open('/home/pingan_ai/dxq/project/cantonese.txt','r').readline().split(' ')
# print(cantoneses)
cantonese = [re.compile(i.decode('utf-8')) for i in cantoneses]
LEARNING_RATE_BASE = 0.02
MODEL_SAVE_PATH = '/media/pingan_ai/dxq/gen_blessing/new_model/'
N_GPU = 2
MODEL_NAME = "blessing.ckpt"
EPOCHS = 100
LEARNING_RATE_DECAY = 0.99
filename = blessing_file.split('/')[-1].split('.')[0]
# print(blessing_file)
can_count = 0
MOVING_AVERAGE_DECAY = 0.99
def HasReapeatWord(string):
flag = False
for i,char in enumerate(string):
# print i
s = i
m = i+1
e = i+2
# print string[s],string[m],string[e]
if flag:
return True
elif e >= (len(string)-1):
return False
else:
if string[s] == string[m] and string[m] == string[e]:
# print string[s],string[m],string[e]
flag = True
else:
continue
def IsCantonese(line):
for i, patten in enumerate(cantonese):
if patten.search(line)!= None:
# print(line)
# can_count = can_count+1
return True
return False
with open(blessing_file, "r") as f:
for i,line in enumerate(f):
if i == 0:
continue
# try:
# print(line)
line = line.decode('UTF-8')
line = line.strip(u'\n')
line = line.replace(u' ',u'')
if u'_' in line or u'(' in line or u'(' in line or u'《' in line or u'[' in line:
continue
if len(line) < minlen or len(line) > maxlen:
continue
if IsCantonese(line):
can_count = can_count+1
continue
if HasReapeatWord(line):
continue
all_words += [word for word in line]
line = u'[' + unicode(chr(len(line)+61)) +line + u']'
blessings.append(line)
# except Exception as e:
# print('no')
if i%100000== 0:
print(u'处理到%d'%i)
blessings = sorted(blessings,key=lambda line: len(line))
print(u'歌词总行数: %s'% len(blessings))
print(can_count)
counter = collections.Counter(all_words)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
print('*******************')
words, _ = zip(*count_pairs)
print(len(words))
for i in range(65,66+maxlen-minlen):
words = words[:len(words)] + (unicode(chr(i)),)
words = words[:len(words)] + (u'[',)
words = words[:len(words)] + (u']',)
words = words[:len(words)] + (u' ',)
print(u'词表总数: %s'% len(words))
word_num_map = dict(zip(words, range(len(words))))
print(word_num_map[u'['])
print(word_num_map[u']'])
print(word_num_map[u' '])
print(word_num_map[u'A'])
print(word_num_map[u'L'])
to_num = lambda word: word_num_map.get(word, len(words)-1)
blessings_vector = [ list(map(to_num,blessing)) for blessing in blessings]
to_words = lambda num: words[num]
print(blessings_vector[-4:-1])
print(blessings_vector[1])
for i in blessings[-4:-1]:
print(i)
print(blessings[1])
with open(filename+'2id_re.json','w') as outfile:
json.dump(word_num_map,outfile,ensure_ascii=False)
# outfile.write('\n')
with open(filename+'2word_re.json','w') as outfile2:
# word2id = dict((value, key) for key,value in word_num_map.iteritems())
json.dump(words,outfile2,ensure_ascii=False)
# outfile2.write('\n')
batch_size = 256
n_chunk = len(blessings_vector) // batch_size
# sys.exit()
class DataSet(object):
def __init__(self,data_size):
self._data_size = data_size
self._epochs_completed = 0
self._index_in_epoch = 0
self._data_index = np.arange(data_size)
def next_batch(self,batch_size):
start = self._index_in_epoch
if start + batch_size > self._data_size:
np.random.shuffle(self._data_index)
self._epochs_completed = self._epochs_completed + 1
self._index_in_epoch = batch_size
full_batch_features ,full_batch_labels = self.data_batch(0,batch_size)
return full_batch_features ,full_batch_labels
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
full_batch_features ,full_batch_labels = self.data_batch(start,end)
if self._index_in_epoch == self._data_size:
self._index_in_epoch = 0
self._epochs_completed = self._epochs_completed + 1
np.random.shuffle(self._data_index)
return full_batch_features,full_batch_labels
def data_batch(self,start,end):
batches = []
for i in range(start,end):
batches.append(blessings_vector[self._data_index[i]])
length = max(map(len,batches))
# print(word_num_map[' '])
xdata = np.full((end - start,length), word_num_map[']'], np.int32)
for row in range(end - start):
xdata[row,:len(batches[row])] = batches[row]
ydata = np.copy(xdata)
ydata[:,:-1] = xdata[:,1:]
return xdata,ydata
#---------------------------------------RNN--------------------------------------#
# 定义RNN
def neural_network(input_data,model='lstm', rnn_size=128, num_layers=2):
if model == 'rnn':
cell_fun = tf.nn.rnn_cell.BasicRNNCell
elif model == 'gru':
cell_fun = tf.nn.rnn_cell.GRUCell
elif model == 'lstm':
cell_fun = tf.nn.rnn_cell.BasicLSTMCell
cell = cell_fun(rnn_size, state_is_tuple=True)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * num_layers, state_is_tuple=True)
initial_state = cell.zero_state(batch_size, tf.float32)
with tf.variable_scope('rnnlm'):
softmax_w = tf.get_variable("softmax_w", [rnn_size, len(words)])
softmax_b = tf.get_variable("softmax_b", [len(words)])
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [len(words), rnn_size])
inputs = tf.nn.embedding_lookup(embedding, input_data)
outputs, last_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state, scope='rnnlm')
output = tf.reshape(outputs,[-1, rnn_size])
logits = tf.matmul(output, softmax_w) + softmax_b
probs = tf.nn.softmax(logits)
return logits, last_state, probs, cell, initial_state
def load_model(sess, saver,ckpt_path):
latest_ckpt = tf.train.latest_checkpoint(ckpt_path)
if latest_ckpt:
print ('resume from', latest_ckpt)
saver.restore(sess, latest_ckpt)
return int(latest_ckpt[latest_ckpt.rindex('-') + 1:])
else:
print ('building model from scratch')
sess.run(tf.global_variables_initializer())
return -1
def to_word(weights):
sample = np.argmax(weights)
return words[sample]
def train_to_word(x):
# print(u'x的长度',len(x))
x_words = map(to_words, x)
# print(str(x_words).decode("unicode-escape"))
outstr = ''.join(x_words)
token = outstr[1]
outstr = outstr[2:-1]
print(u'[ '+ token +u' '+ outstr+u' ]')
def AlignSentence(sentence):
sentence = sentence[:-2]
sentence_re = ''
for i in range(len(sentence)):
if not (sentence[i] >= u'\u4e00' and sentence[i]<=u'\u9fa5'):
sentence_re += sentence[i]+u' '
else:
sentence_re += sentence[i]
# return u'[ '+sentence[i] + u' ]'
print sentence_re + u' ]'
def get_loss(input_data, targets, reuse_variables=None):
# 沿用5.5节中定义的函数来计算神经网络的前向传播结果。
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables):
logits, last_state, probs, _, _ = neural_network(input_data)
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[targets],
[tf.ones_like(targets, dtype=tf.float32)],
len(words)
)
cost = tf.reduce_mean(loss)
return cost
# 计算每一个变量梯度的平均值。
def average_gradients(tower_grads):
average_grads = []
# 枚举所有的变量和变量在不同GPU上计算得出的梯度。
for grad_and_vars in zip(*tower_grads):
# 计算所有GPU上的梯度平均值。
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
# 将变量和它的平均梯度对应起来。
average_grads.append(grad_and_var)
# 返回所有变量的平均梯度,这个将被用于变量的更新。
return average_grads
# def main(argv=None):
def main(argv=None):
# 将简单的运算放在CPU上,只有神经网络的训练过程放在GPU上。
TRAINING_STEPS = EPOCHS*n_chunk/N_GPU
with tf.Graph().as_default(), tf.device('/cpu:0'):
input_data = tf.placeholder(tf.int32, [batch_size, None])
output_targets = tf.placeholder(tf.int32, [batch_size, None])
trainds = DataSet(len(blessings_vector))
targets = tf.reshape(output_targets, [-1])
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE, global_step, 60000 / batch_size, LEARNING_RATE_DECAY)
optimizer = tf.train.AdamOptimizer(learning_rate)
tower_grads = []
reuse_variables = False
# 将神经网络的优化过程跑在不同的GPU上。
for i in range(N_GPU):
# 将优化过程指定在一个GPU上。
with tf.device('/gpu:%d' % i):
with tf.name_scope('GPU_%d' % i) as scope:
cur_loss = get_loss(input_data,targets,reuse_variables)
# 在第一次声明变量之后,将控制变量重用的参数设置为True。这样可以
# 让不同的GPU更新同一组参数。
reuse_variables = True
grads = optimizer.compute_gradients(cur_loss)
tower_grads.append(grads)
# 计算变量的平均梯度。
grads = average_gradients(tower_grads)
for grad, var in grads:
if grad is not None:
tf.summary.histogram('gradients_on_average/%s' % var.op.name, grad)
# 使用平均梯度更新参数。
apply_gradient_op = optimizer.apply_gradients(grads, global_step=global_step)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# 计算变量的滑动平均值。
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_to_average = (tf.trainable_variables() +tf.moving_average_variables())
variables_averages_op = variable_averages.apply(variables_to_average)
# 每一轮迭代需要更新变量的取值并更新变量的滑动平均值。
train_op = tf.group(apply_gradient_op, variables_averages_op)
saver = tf.train.Saver()
summary_op = tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session(config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True)) as sess:
# 初始化所有变量并启动队列。
init.run()
summary_writer = tf.summary.FileWriter(MODEL_SAVE_PATH, sess.graph)
for step in range(TRAINING_STEPS):
# 执行神经网络训练操作,并记录训练操作的运行时间。
start_time = time.time()
x,y = trainds.next_batch(batch_size)
_, loss_value = sess.run([train_op, cur_loss],feed_dict={input_data: x, output_targets: y})
duration = time.time() - start_time
# 每隔一段时间数据当前的训练进度,并统计训练速度。
if step != 0 and step % 10 == 0:
# 计算使用过的训练数据个数。因为在每一次运行训练操作时,每一个GPU
# 都会使用一个batch的训练数据,所以总共用到的训练数据个数为
# batch大小 × GPU个数。
num_examples_per_step = batch_size * N_GPU
# num_examples_per_step为本次迭代使用到的训练数据个数,
# duration为运行当前训练过程使用的时间,于是平均每秒可以处理的训
# 练数据个数为num_examples_per_step / duration。
examples_per_sec = num_examples_per_step / duration
# duration为运行当前训练过程使用的时间,因为在每一个训练过程中,
# 每一个GPU都会使用一个batch的训练数据,所以在单个batch上的训
# 练所需要时间为duration / GPU个数。
sec_per_batch = duration / N_GPU
# 输出训练信息。
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)')
print (format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch))
# 通过TensorBoard可视化训练过程。
summary = sess.run(summary_op)
summary_writer.add_summary(summary, step)
# 每隔一段时间保存当前的模型。
if step == n_chunk:
checkpoint_path = os.path.join(MODEL_SAVE_PATH, MODEL_NAME)
saver.save(sess, checkpoint_path, global_step=step)
main()
# if __name__ == '__main__':
# tf.app.run()
|
[
"407383787@qq.com"
] |
407383787@qq.com
|
1eb08df1e69d0570a4b551015f6243b3accb3169
|
e88106f6223882f5d5e7eee23e33490f33fe50f0
|
/db_create.py
|
8ba7d15940ac63c8b2de3128b59007a65095a800
|
[] |
no_license
|
canonhui/VacHeure
|
4fc3b2d3f9ca8c69e423d2dfed6bd360975b1109
|
90f11882a94336e585c01812ef6d0f800b5d1493
|
refs/heads/master
| 2020-03-28T20:31:00.846404
| 2017-07-26T16:12:45
| 2017-07-26T16:12:45
| 94,612,924
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
#!/usr/bin/env python3
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,
api.version(SQLALCHEMY_MIGRATE_REPO))
|
[
"ptt2hui@gmail.com"
] |
ptt2hui@gmail.com
|
b793d9f4e13c712ddcf0d002de824cf6639c73c1
|
8ea4ca8746e9080b9522c6244807d42234260034
|
/web2pyApp/miFacebook/languages/es.py
|
cfe6e458d6797510d1fda9db26f6dd973738b5ff
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
bhandaribhumin/daw2
|
a59d4f1f64785bbbef55d2f4ca77edb02dce4578
|
480597ef2131853f7c0c4c61b4334257d12aef28
|
refs/heads/master
| 2020-12-31T05:39:35.951295
| 2014-06-18T11:36:45
| 2014-06-18T11:36:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,615
|
py
|
# coding: utf8
{
'!langcode!': 'es',
'!langname!': 'Español',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"actualice" es una expresión opcional como "campo1=\'nuevo_valor\'". No se puede actualizar o eliminar resultados de un JOIN',
'%s %%{row} deleted': '%s filas eliminadas',
'%s %%{row} updated': '%s filas actualizadas',
'%s selected': '%s seleccionado(s)',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'(something like "it-it")': '(algo como "it-it")',
'A new version of web2py is available': 'Hay una nueva versión de web2py disponible',
'A new version of web2py is available: %s': 'Hay una nueva versión de web2py disponible: %s',
'About': 'Acerca de',
'about': 'acerca de',
'About application': 'Acerca de la aplicación',
'Access Control': 'Access Control',
'additional code for your application': 'código adicional para su aplicación',
'admin disabled because no admin password': ' por falta de contraseña',
'admin disabled because not supported on google app engine': 'admin deshabilitado, no es soportado en GAE',
'admin disabled because unable to access password file': 'admin deshabilitado, imposible acceder al archivo con la contraseña',
'Admin is disabled because insecure channel': 'Admin deshabilitado, el canal no es seguro',
'Admin is disabled because unsecure channel': 'Admin deshabilitado, el canal no es seguro',
'Administrative interface': 'Interfaz administrativa',
'Administrative Interface': 'Administrative Interface',
'Administrator Password:': 'Contraseña del Administrador:',
'Ajax Recipes': 'Ajax Recipes',
'and rename it (required):': 'y renombrela (requerido):',
'and rename it:': ' y renombrelo:',
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'admin deshabilitado, el canal no es seguro',
'application "%s" uninstalled': 'aplicación "%s" desinstalada',
'application compiled': 'aplicación compilada',
'application is compiled and cannot be designed': 'la aplicación está compilada y no puede ser modificada',
'Are you sure you want to delete file "%s"?': '¿Está seguro que desea eliminar el archivo "%s"?',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Are you sure you want to uninstall application "%s"': '¿Está seguro que desea desinstalar la aplicación "%s"',
'Are you sure you want to uninstall application "%s"?': '¿Está seguro que desea desinstalar la aplicación "%s"?',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENCION: Inicio de sesión requiere una conexión segura (HTTPS) o localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENCION: NO EJECUTE VARIAS PRUEBAS SIMULTANEAMENTE, NO SON THREAD SAFE.',
'ATTENTION: you cannot edit the running application!': 'ATENCION: no puede modificar la aplicación que se ejecuta!',
'Authentication': 'Autenticación',
'Available Databases and Tables': 'Bases de datos y tablas disponibles',
'Buy this book': 'Buy this book',
'Cache': 'Cache',
'cache': 'cache',
'Cache Keys': 'Cache Keys',
'cache, errors and sessions cleaned': 'cache, errores y sesiones eliminados',
'Cannot be empty': 'No puede estar vacío',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'No se puede compilar: hay errores en su aplicación. Depure, corrija errores y vuelva a intentarlo.',
'cannot create file': 'no es posible crear archivo',
'cannot upload file "%(filename)s"': 'no es posible subir archivo "%(filename)s"',
'Change Password': 'Cambie Contraseña',
'change password': 'cambie contraseña',
'check all': 'marcar todos',
'Check to delete': 'Marque para eliminar',
'Check to delete:': 'Check to delete:',
'clean': 'limpiar',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'click to check for upgrades': 'haga clic para buscar actualizaciones',
'Client IP': 'IP del Cliente',
'Community': 'Community',
'compile': 'compilar',
'compiled application removed': 'aplicación compilada removida',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controlador',
'Controllers': 'Controladores',
'controllers': 'controladores',
'Copyright': 'Derechos de autor',
'create file with filename:': 'cree archivo con nombre:',
'Create new application': 'Cree una nueva aplicación',
'create new application:': 'nombre de la nueva aplicación:',
'Created By': 'Created By',
'Created On': 'Created On',
'crontab': 'crontab',
'Current request': 'Solicitud en curso',
'Current response': 'Respuesta en curso',
'Current session': 'Sesión en curso',
'currently saved or': 'actualmente guardado o',
'customize me!': 'Adaptame!',
'data uploaded': 'datos subidos',
'Database': 'base de datos',
'Database %s select': 'selección en base de datos %s',
'database administration': 'administración base de datos',
'Date and Time': 'Fecha y Hora',
'db': 'db',
'DB Model': 'Modelo "db"',
'defines tables': 'define tablas',
'Delete': 'Elimine',
'delete': 'eliminar',
'delete all checked': 'eliminar marcados',
'Delete:': 'Elimine:',
'Demo': 'Demo',
'Deploy on Google App Engine': 'Instale en Google App Engine',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Descripción',
'design': 'modificar',
'DESIGN': 'DISEÑO',
'Design for': 'Diseño para',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentación',
"Don't know what to do?": "Don't know what to do?",
'done!': 'listo!',
'Download': 'Download',
'E-mail': 'Correo electrónico',
'edit': 'editar',
'EDIT': 'EDITAR',
'Edit': 'Editar',
'Edit application': 'Editar aplicación',
'edit controller': 'editar controlador',
'Edit current record': 'Edite el registro actual',
'Edit Profile': 'Editar Perfil',
'edit profile': 'editar perfil',
'Edit This App': 'Edite esta App',
'Editing file': 'Editando archivo',
'Editing file "%s"': 'Editando archivo "%s"',
'Email and SMS': 'Email and SMS',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'Error logs for "%(app)s"': 'Bitácora de errores en "%(app)s"',
'errors': 'errores',
'Errors': 'Errors',
'export as csv file': 'exportar como archivo CSV',
'exposes': 'expone',
'extends': 'extiende',
'failed to reload module': 'recarga del módulo ha fallado',
'FAQ': 'FAQ',
'file "%(filename)s" created': 'archivo "%(filename)s" creado',
'file "%(filename)s" deleted': 'archivo "%(filename)s" eliminado',
'file "%(filename)s" uploaded': 'archivo "%(filename)s" subido',
'file "%(filename)s" was not deleted': 'archivo "%(filename)s" no fué eliminado',
'file "%s" of %s restored': 'archivo "%s" de %s restaurado',
'file changed on disk': 'archivo modificado en el disco',
'file does not exist': 'archivo no existe',
'file saved on %(time)s': 'archivo guardado %(time)s',
'file saved on %s': 'archivo guardado %s',
'First name': 'Nombre',
'Forgot username?': 'Forgot username?',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Friends': 'Amigos',
'Functions with no doctests will result in [passed] tests.': 'Funciones sin doctests equivalen a pruebas [aceptadas].',
'Group %(group_id)s created': 'Group %(group_id)s created',
'Group ID': 'ID de Grupo',
'Group uniquely assigned to user %(id)s': 'Group uniquely assigned to user %(id)s',
'Groups': 'Groups',
'Hello World': 'Hola Mundo',
'help': 'ayuda',
'Home': 'Inicio',
'How did you get here?': 'How did you get here?',
'htmledit': 'htmledit',
'import': 'import',
'Import/Export': 'Importar/Exportar',
'includes': 'incluye',
'Index': 'Indice',
'Inicio de sesión': 'Inicio de sesión',
'insert new': 'inserte nuevo',
'insert new %s': 'inserte nuevo %s',
'Installed applications': 'Aplicaciones instaladas',
'internal error': 'error interno',
'Internal State': 'Estado Interno',
'Introduction': 'Introduction',
'Invalid action': 'Acción inválida',
'Invalid email': 'Correo inválido',
'Invalid login': 'Invalid login',
'invalid password': 'contraseña inválida',
'Invalid Query': 'Consulta inválida',
'invalid request': 'solicitud inválida',
'invalid ticket': 'tiquete inválido',
'Is Active': 'Is Active',
'Key': 'Key',
'language file "%(filename)s" created/updated': 'archivo de lenguaje "%(filename)s" creado/actualizado',
'Language files (static strings) updated': 'Archivos de lenguaje (cadenas estáticas) actualizados',
'languages': 'lenguajes',
'Languages': 'Lenguajes',
'languages updated': 'lenguajes actualizados',
'Last name': 'Apellido',
'Last saved on:': 'Guardado en:',
'Layout': 'Diseño de página',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'License for': 'Licencia para',
'Live Chat': 'Live Chat',
'loading...': 'cargando...',
'Logged in': 'Logged in',
'Logged out': 'Logged out',
'Login': 'Inicio de sesión',
'login': 'inicio de sesión',
'Login to the Administrative Interface': 'Inicio de sesión para la Interfaz Administrativa',
'logout': 'fin de sesión',
'Logout': 'Fin de sesión',
'Lost Password': 'Contraseña perdida',
'Lost password?': 'Lost password?',
'lost password?': '¿olvido la contraseña?',
'Main Menu': 'Menú principal',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Modelo "menu"',
'merge': 'combinar',
'Messages': 'Mensajes',
'Models': 'Modelos',
'models': 'modelos',
'Modified By': 'Modified By',
'Modified On': 'Modified On',
'Modules': 'Módulos',
'modules': 'módulos',
'My Sites': 'My Sites',
'Name': 'Nombre',
'new application "%s" created': 'nueva aplicación "%s" creada',
'New Record': 'Registro nuevo',
'new record inserted': 'nuevo registro insertado',
'next 100 rows': '100 filas siguientes',
'NO': 'NO',
'No databases in this application': 'No hay bases de datos en esta aplicación',
'Object or table name': 'Object or table name',
'Online examples': 'Ejemplos en línea',
'or import from csv file': 'o importar desde archivo CSV',
'or provide application url:': 'o provea URL de la aplicación:',
'Origin': 'Origen',
'Original/Translation': 'Original/Traducción',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'pack all': 'empaquetar todo',
'pack compiled': 'empaquete compiladas',
'Password': 'Contraseña',
"Password fields don't match": "Password fields don't match",
'Peeking at file': 'Visualizando archivo',
'please input your password again': 'please input your password again',
'Plugins': 'Plugins',
'Powered by': 'Este sitio usa',
'Preface': 'Preface',
'previous 100 rows': '100 filas anteriores',
'Profile': 'Perfil',
'Profile updated': 'Perfil actualizado',
'Python': 'Python',
'Query:': 'Consulta:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'registro',
'Record %(id)s created': 'Record %(id)s created',
'Record Created': 'Record Created',
'record does not exist': 'el registro no existe',
'Record ID': 'ID de Registro',
'Record id': 'id de registro',
'register': 'registrese',
'Register': 'Registrese',
'Registration identifier': 'Registration identifier',
'Registration key': 'Contraseña de Registro',
'Registration successful': 'Registration successful',
'Registrese': 'Registrese',
'Remember me (for 30 days)': 'Remember me (for 30 days)',
'remove compiled': 'eliminar compiladas',
'request friendship': 'Petición de amistad',
'Reset Password key': 'Reset Password key',
'Resolve Conflict file': 'archivo Resolución de Conflicto',
'restore': 'restaurar',
'revert': 'revertir',
'Role': 'Rol',
'Rows in Table': 'Filas en la tabla',
'Rows selected': 'Filas seleccionadas',
'save': 'guardar',
'Save profile': 'Save profile',
'Saved file hash:': 'Hash del archivo guardado:',
'Search': 'Buscar',
'Search for friends': 'Search for friends',
'Semantic': 'Semantic',
'Services': 'Services',
'session expired': 'sesión expirada',
'shell': 'shell',
'site': 'sitio',
'Size of cache:': 'Size of cache:',
'some files could not be removed': 'algunos archivos no pudieron ser removidos',
'state': 'estado',
'static': 'estáticos',
'Static files': 'Archivos estáticos',
'Statistics': 'Statistics',
'Stylesheet': 'Hoja de estilo',
'Submit': 'Submit',
'submit': 'submit',
'Support': 'Support',
'Sure you want to delete this object?': '¿Está seguro que desea eliminar este objeto?',
'Table': 'tabla',
'Table name': 'Nombre de la tabla',
'test': 'probar',
'Testing application': 'Probando aplicación',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "consulta" es una condición como "db.tabla1.campo1==\'valor\'". Algo como "db.tabla1.campo1==db.tabla2.campo2" resulta en un JOIN SQL.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'la lógica de la aplicación, cada ruta URL se mapea en una función expuesta en el controlador',
'The Core': 'The Core',
'the data representation, define database tables and sets': 'la representación de datos, define tablas y conjuntos de base de datos',
'The output of the file is a dictionary that was rendered by the view %s': 'La salida del archivo es un diccionario escenificado por la vista %s',
'the presentations layer, views are also known as templates': 'la capa de presentación, las vistas también son llamadas plantillas',
'The Views': 'The Views',
'There are no controllers': 'No hay controladores',
'There are no models': 'No hay modelos',
'There are no modules': 'No hay módulos',
'There are no static files': 'No hay archivos estáticos',
'There are no translators, only default language is supported': 'No hay traductores, sólo el lenguaje por defecto es soportado',
'There are no views': 'No hay vistas',
'these files are served without processing, your images go here': 'estos archivos son servidos sin procesar, sus imágenes van aquí',
'This App': 'This App',
'This is a copy of the scaffolding application': 'Esta es una copia de la aplicación de andamiaje',
'This is the %(filename)s template': 'Esta es la plantilla %(filename)s',
'Ticket': 'Tiquete',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Timestamp',
'to previous version.': 'a la versión previa.',
'translation strings for the application': 'cadenas de caracteres de traducción para la aplicación',
'try': 'intente',
'try something like': 'intente algo como',
'Twitter': 'Twitter',
'Unable to check for upgrades': 'No es posible verificar la existencia de actualizaciones',
'unable to create application "%s"': 'no es posible crear la aplicación "%s"',
'unable to delete file "%(filename)s"': 'no es posible eliminar el archivo "%(filename)s"',
'Unable to download': 'No es posible la descarga',
'Unable to download app': 'No es posible descarga la aplicación',
'unable to parse csv file': 'no es posible analizar el archivo CSV',
'unable to uninstall "%s"': 'no es posible instalar "%s"',
'uncheck all': 'desmarcar todos',
'uninstall': 'desinstalar',
'update': 'actualizar',
'update all languages': 'actualizar todos los lenguajes',
'Update:': 'Actualice:',
'upload application:': 'subir aplicación:',
'Upload existing application': 'Suba esta aplicación',
'upload file:': 'suba archivo:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, y ~(...) para NOT, para crear consultas más complejas.',
'User %(id)s Logged-in': 'User %(id)s Logged-in',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Profile updated': 'User %(id)s Profile updated',
'User %(id)s Registered': 'User %(id)s Registered',
'User ID': 'ID de Usuario',
'value already in database or empty': 'value already in database or empty',
'Verify Password': 'Verify Password',
'versioning': 'versiones',
'Videos': 'Videos',
'View': 'Vista',
'view': 'vista',
'Views': 'Vistas',
'views': 'vistas',
'Wall': 'Muro',
'web2py is up to date': 'web2py está actualizado',
'web2py Recent Tweets': 'Tweets Recientes de web2py',
'Welcome': 'Bienvenido',
'Welcome %s': 'Bienvenido %s',
'Welcome to web2py': 'Bienvenido a web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'La cual llamó la función %s localizada en el archivo %s',
'YES': 'SI',
'You are successfully running web2py': 'Usted está ejecutando web2py exitosamente',
'You can modify this application and adapt it to your needs': 'Usted puede modificar esta aplicación y adaptarla a sus necesidades',
'You visited the url %s': 'Usted visitó la url %s',
}
|
[
"albertogonzcat@MacBook-Pro-de-Alberto.local"
] |
albertogonzcat@MacBook-Pro-de-Alberto.local
|
e4d255fd819e5b3b88396a6700608eb801008567
|
c0dafd8d9306af9e94084b2fedbe75f9d6069af1
|
/popcorn/rpc/pyro.py
|
f356b018201aa0ad1ec2ab2d7bab0ba303c4c27a
|
[] |
no_license
|
demien/popcorn
|
aa573e4c57bda5b990bd1a6d5d589f8e6e7f690f
|
d866dd818c641a377abc9c55fb4fb181d52ac4d5
|
refs/heads/master
| 2021-06-09T12:30:00.973657
| 2016-10-26T08:25:36
| 2016-10-26T08:25:36
| 60,506,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,930
|
py
|
import copy
import Pyro4
import socket
import threading
from celery import bootsteps
from popcorn.rpc import DISPATHCER_SERVER_OBJ_ID, GUARD_PORT, HUB_PORT
from popcorn.rpc.base import BaseRPCServer, BaseRPCClient, RPCDispatcher
from popcorn.utils import get_log_obj, get_pid
debug, info, warn, error, critical = get_log_obj(__name__)
__all__ = ['PyroServer', 'PyroClient']
DEFAULT_SERIALIZER = 'pickle'
DEFAULT_SERVERTYPE = 'multiplex'
# DEFAULT_SERVERTYPE = 'thread'
class PyroBase(object):
def __init__(self, **kwargs):
Pyro4.config.SERVERTYPE = DEFAULT_SERVERTYPE
Pyro4.config.SERIALIZER = DEFAULT_SERIALIZER
class PyroServer(BaseRPCServer, PyroBase):
def __init__(self, port):
PyroBase.__init__(self)
self.port = port
self.daemon = None
self.thread = None
@property
def ip(self):
host = socket.gethostname()
return socket.gethostbyname(host)
@property
def alive(self):
if self.thread is not None and self.thread.is_alive():
return True
return False
def start_daemon(self):
if self.daemon is None or self.daemon.transportServer is None:
self.daemon = Pyro4.Daemon(host=self.ip, port=self.port) # init a Pyro daemon
def start(self):
"""
Start a pyro server
Fire a new thread for the server daemon loop.
This mehtod is blocking till the server daemon loop is ready.
"""
self.start_daemon()
uri = self.register(RPCDispatcher, DISPATHCER_SERVER_OBJ_ID)
thread = threading.Thread(target=self.daemon.requestLoop)
thread.daemon = True
thread.start()
while not thread.is_alive():
continue
self.thread = thread
info('[RPC Server] - [Start] - %s.' % uri)
def stop(self):
"""
Stop the pyro server
Notice. the step order is quite important and can not change.
Step 1: stop the daemon loop
Step 2: stop the socket server
Step 3: unregister the dispather class
"""
self.daemon.shutdown()
if self.thread is not None and self.thread.is_alive():
while self.thread.is_alive():
continue
self.daemon.close()
self.unregister(RPCDispatcher)
info('[RPC Server] - [Shutdown] - exit daemon loop')
def register(self, obj, obj_id=None):
"""
Register the obj to the server.
"""
try:
return self.daemon.register(obj, obj_id, force=True) # register a obj with obj id
except Exception as e:
return self.daemon.uriFor(obj_id)
def unregister(self, obj):
"""
Unregister the obj from the server.
Ignore if unregister an unexist obj.
"""
try:
return self.daemon.unregister(obj)
except Exception as e:
pass # don't care for multi unregister
class PyroClient(BaseRPCClient, PyroBase):
def __init__(self, server_ip, server_port):
PyroBase.__init__(self)
dispatcher_uri = self.get_uri(DISPATHCER_SERVER_OBJ_ID, server_ip, server_port)
self.default_proxy = self.get_proxy_obj(dispatcher_uri) # get local proxy obj
def call(self, path, *args, **kwargs):
"""
Call a remote obj or class.
:param str path: the path of the callable obj. A valid one: popcorn.apps.hub:Hub.scan.
More detail of path please check popcorn.utils.imports.symbol_by_name
"""
try:
return self.default_proxy.dispatch(path, *args, **kwargs)
except Exception as e:
error('[RPC Client] - [Call] - [Error]: %s, %s' % (e.message, path))
def get_proxy_obj(self, uri):
return Pyro4.Proxy(uri)
def get_uri(self, obj_id, server_ip, port):
return 'PYRO:%s@%s:%s' % (str(obj_id), str(server_ip), str(port))
|
[
"demien@appannie.com"
] |
demien@appannie.com
|
880859f1a8ae3ed7dbb4337a7cad06f3487bf0a6
|
f83b7e61f54d885faf2414187a6fbd8ebbbba543
|
/lectures/cs532-s19/assignments/A6/correlation.py
|
ad8663da019d7e24bd90a1c80d3715c7f488c09d
|
[] |
no_license
|
bdeme004/anwala.github.io
|
0c2fd7ec79c32b0f524874d8ff5ede1b84b80b10
|
ccbe10a516855cf7d1f635d93e4c4a0c6f4c4326
|
refs/heads/master
| 2020-04-20T04:51:45.788646
| 2019-05-01T03:14:05
| 2019-05-01T03:14:05
| 168,640,426
| 0
| 0
| null | 2019-02-01T04:07:43
| 2019-02-01T04:07:43
| null |
UTF-8
|
Python
| false
| false
| 1,446
|
py
|
import recommendations as recs
import BJDfunctions as fn
PROXY = 477 #712
PEOPLE = [471, 280, 373, 642, 330, 450, 541, 577, 864, 43, 805, 313, 504, 254, 94, 399,
5, 92, 381, 716, 49, 1, 843, 222]
def topAndBottom(dataset):
for item in fn.topValues(dataset):
fn.printFail(item)
print("\n")
for item in fn.bottomValues(dataset):
fn.printFail(item)
print("\n")
ratings = fn.load_data()
item_mode = recs.transformPrefs(ratings)
fn.printFail(recs.sim_pearson(item_mode, 'Star Wars (1977)', 'Grease (1978)'))
fn.printFail(recs.sim_pearson(item_mode, 'Star Wars (1977)', 'While You Were Sleeping (1995)'))
fn.printFail(recs.sim_pearson(item_mode, 'Star Wars (1977)', 'Sleepless in Seattle (1993)'))
cats = item_mode['Wallace & Gromit: The Best of Aardman Animation (1996)']
gun = item_mode['Grease (1978)']
dogs = item_mode['While You Were Sleeping (1995)']
cats_dogs = list()
cats_gun = list()
corr = 0
print("\n")
for item in dogs:
if item in cats:
cats_dogs.append((item, cats[item], dogs[item]))
if item in gun:
cats_gun.append((item, dogs[item], gun[item]))
for item in cats_dogs:
print(item)
print("\n")
for item in cats_gun:
print(item)
print("\n")
print(corr)
print("len cats:")
print(len(cats))
print("len dogs:")
print(len(dogs))
print("len gun:")
print(len(gun))
print("len dogs/cats:")
print(len(cats_dogs))
print("len dogs/gun:")
print(len(cats_gun))
|
[
"43201288+bdeme004@users.noreply.github.com"
] |
43201288+bdeme004@users.noreply.github.com
|
3e74af071c20b8fe4f410c52f75be3b2e4848392
|
1fa5805dc15ad2529d1b343d4fd5a4205dcc4701
|
/modules.py
|
3cb06a1155d2337dadcfc620dd5217b5cf6ea340
|
[] |
no_license
|
readbeard/activation_fn
|
527c91246df7ace5b427f6ab9aba4c78fc001a29
|
fd42647c7c9bfa041e4db0ce723cd61c64a1ad04
|
refs/heads/master
| 2022-09-27T05:03:51.900709
| 2020-06-09T17:38:57
| 2020-06-09T17:38:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,017
|
py
|
import torch
import torch.nn as nn
class Antirelu(nn.Module):
def __init__(self):
super(Antirelu, self).__init__()
def forward(self, s):
return torch.min(torch.zeros(s.shape), s)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, s):
return s
class MLP(nn.Module):
def __init__(self, combinator):
super(MLP, self).__init__()
if combinator == 'MLP1': # 104202 parameters
self.mlp = torch.nn.Sequential(nn.Linear(4, 3),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(3, 1),
)
if combinator == 'MLP1_neg': # 104202 parameters
self.mlp = torch.nn.Sequential(nn.Linear(8, 5),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(5, 1),
)
if combinator == 'MLP2': # 104970
self.mlp = torch.nn.Sequential(nn.Linear(4, 4),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(4, 1),
)
if combinator == 'MLP3': # 104202 parameters --> same of MLP1 but w/out dropout
self.mlp = torch.nn.Sequential(nn.Linear(4, 3),
nn.ReLU(),
nn.Linear(3, 1),
)
if combinator == 'MLP4': # 104970 --> same of MLP1 but w/out dropout
self.mlp = torch.nn.Sequential(nn.Linear(4, 4),
nn.ReLU(),
nn.Linear(4, 1),
)
if combinator == 'MLP5': # 105098
self.mlp = torch.nn.Sequential(nn.Linear(4, 3),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(3, 2),
nn.ReLU(),
nn.Linear(2, 1),
)
def forward(self, x):
x = self.mlp(x)
return x
class MLP_ATT(nn.Module):
def __init__(self, combinator):
super(MLP_ATT, self).__init__()
if combinator in ['MLP_ATT', 'MLP_ATT_b']: # 105738 parameters
self.combinator = combinator
if combinator == 'MLP_ATT_b':
self.beta = nn.Parameter(torch.FloatTensor(4).uniform_(-0.5, 0.5))
self.mlp = torch.nn.Sequential(nn.Linear(4, 3),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(3, 4),
)
if combinator in ['MLP_ATT_neg']: # 105738 parameters
self.mlp = torch.nn.Sequential(nn.Linear(8, 5),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(5, 8),
)
if combinator == 'MLP_ATT2': # 106890 parameters
self.mlp = torch.nn.Sequential(nn.Linear(4, 4),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(4, 4),
)
def forward(self, x):
if self.combinator == 'MLP_ATT_b':
x = x + self.beta
# print(x.shape)
x = self.mlp(x)
return x
|
[
"bartozzi.1535211@studenti.uniroma1.it"
] |
bartozzi.1535211@studenti.uniroma1.it
|
978f0fbbe7b7a78fae29b9164dc0c70b71df4389
|
1367c7996bb8daff336a1a83d2cbd58413a4837a
|
/TotalAssets/adminx.py
|
03dd72cf2caac5a2d61fa0b92499f602eae055df
|
[] |
no_license
|
HtWr/ITAM
|
31252b0905f76d1b631f0d470a15cd05d36c3aae
|
bc93c6a5e5157147f031b434a7eba37303841022
|
refs/heads/master
| 2020-09-22T17:10:24.466400
| 2019-12-03T03:23:06
| 2019-12-03T03:23:06
| 225,272,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
import xadmin
from xadmin import views
class BaseSetting(object):
"""主题配置"""
enable_themes = True
use_bootswatch = True
xadmin.site.register(views.BaseAdminView, BaseSetting)
class GlobalSetting(object):
site_title = 'Local IT'
site_footer = 'CTU-Local IT'
menu_style = 'accordion'
xadmin.site.register(views.CommAdminView, GlobalSetting)
|
[
"27656615+HtWr@users.noreply.github.com"
] |
27656615+HtWr@users.noreply.github.com
|
6b1b084f116c65b5e5fe93e2388539fad7f97c69
|
8038e8005693a777be5deb462635e5ecc2f4d6a0
|
/Scrapper.py
|
7b1a5628e6c9bf6c0449f6e492c9a37e5685cd69
|
[] |
no_license
|
Waruiru/twitter_data_mining
|
56197eecf5326ff40244bfbbea52c5b03463c357
|
ab7acf123cac229042ada8e0214e102f83b91f17
|
refs/heads/master
| 2020-04-25T16:35:47.039580
| 2018-09-12T09:58:36
| 2018-09-12T09:58:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,218
|
py
|
from tweepy import Stream
import json
import tweepy
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
from flask import Flask, render_template, request, flash, url_for, redirect
app = Flask(__name__)
# consumer key, consumer secret, access token, access secret.
ckey = "6yMsHkLwEtLldyk2MinN8N7Mb"
csecret = "NzFmWgiSWwiF0fK4ic6mnqfaPuUNg471pb2Qcx6aS89z80ho72"
atoken = "1709707117-JibK1EyA7TCS3Hhuzn5rfOBKPSpepkm0jPSFHfP"
asecret = "zae8WJWSXoocsXiCYt8VQ0WJxBYQmP9sbvkXYUGbiYpB0"
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
print "service started!"
class listener(StreamListener):
def on_data(self, data):
print "method for tweet retrieval started!"
import sentiment_mod as s
all_data = json.loads(data)
print "data fetched, saving it to file!"
outputfile = open('raw_data.txt', 'a')
outputfile.write(data)
outputfile.write('\n')
outputfile.close()
print "written to file!"
tweet = all_data["text"]
sentiment_value, confidence = s.sentiment(tweet)
display =(tweet, sentiment_value, confidence)
print display
if confidence * 100 >= 80:
output = open("twitter-out.txt", "a")
output.write(sentiment_value)
output.write('\n')
output.close()
savetweet = open("tweet.txt", "a")
savetweet.write(tweet)
savetweet.write('\n')
savetweet.close()
print "returning result to web!"
return display
return True
def on_error(self, status):
print "error ! ->"
print(status)
@app.route('/', methods=['GET'])
def index():
print "get request received! Processing request"
return render_template("home.html")
@app.route('/post', methods=['post'])
def method():
print "post request!"
search_string = request.form.get('search_string')
print search_string
twitterStream = Stream(auth, listener())
twitterStream.filter(track=[search_string])
return
if __name__ == '__main__':
app.run()
|
[
"jjswork2@gmail.com"
] |
jjswork2@gmail.com
|
b14a54178a2ef7198f315817ce57ac15eb5c81a3
|
1b47a013b4f1ef0d5699c7b94528bc0cf8d96f66
|
/readLog/interview_import/read.py
|
58a8d583645fd791b58bd12c27aa9b348e4b1662
|
[] |
no_license
|
skysunwei/pyworks
|
2a476f06e4e3cd29d7e56610858e2805d1bf0bfc
|
2fb2496d303eed181c3ad1244ff692d2eeecec6a
|
refs/heads/master
| 2022-10-23T01:56:54.764685
| 2020-03-16T07:32:09
| 2020-03-16T07:32:09
| 63,683,067
| 2
| 0
| null | 2022-10-05T22:47:19
| 2016-07-19T10:04:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,401
|
py
|
#-*- coding: UTF-8 -*-
import xlrd
import os
import sys
import time
reload(sys)
sys.setdefaultencoding('utf-8')
root_dir = "excel"
def is_useless_interview(feedback):
if feedback is '':
return True
if '电话是否接通' in feedback:
if '电话是否接通 : 是' in feedback:
return False
else:
return True
# print feedback
return False
def str_to_timestamp(interview_time):
try:
time_array = time.strptime(interview_time, "%Y年%m月%d日%H:%M:%S")
return int(time.mktime(time_array))
except:
return interview_time
def method_name():
feed_back_collection = []
for parent, dirNames, file_names in os.walk(root_dir):
for file_name in file_names:
excel_file = os.path.join(parent, file_name)
data = xlrd.open_workbook(excel_file)
sheet_names = data.sheet_names()
for i in range(0, len(sheet_names)):
table = data.sheets()[i]
first_row_items = table.row_values(0)
if '电话是否接通' in first_row_items:
if '回访时间' in first_row_items:
column_index_of_time = first_row_items.index('回访时间')
else:
print file_name + ', 没有找到回访时间.'
for row_item in first_row_items:
print row_item
continue
colume_str_of_tel = '联系电话'
if colume_str_of_tel in first_row_items:
column_index_of_tel = first_row_items.index(colume_str_of_tel)
else:
print file_name + ', 没有找到联系电话.'
for row_item in first_row_items:
print row_item
continue
for j in range(1, table.nrows):
interview_time = str(table.cell(j, column_index_of_time).value)
if interview_time is '':
continue
tel = str(table.cell(j, column_index_of_tel).value).strip('.0')
feed_back = ''
for k in range(column_index_of_time + 1, table.ncols):
feed_back_item = str(table.cell(j, k).value)
if feed_back_item.strip(' ') is not '':
feed_back += str(table.cell(0, k).value) + ' : ' + feed_back_item + '\n'
if is_useless_interview(feed_back):
continue
one_feedback = {}
one_feedback['tel'] = tel
one_feedback['time'] = str_to_timestamp(interview_time)
one_feedback['content'] = feed_back
feed_back_collection.append(one_feedback)
return feed_back_collection
# str_to_timestamp()
feed_backs = method_name()
for item in feed_backs:
print "insert `customerservice`(`csnote`,`userid`,`startdateline`,`dateline`,`mobile`) values('%s',1,%s,%s,'%s'); "%(
item['content'],\
item['time'],\
item['time'],\
item['tel'])
# print is_useless_interview('电话是否接通 : 是')
|
[
"skysunwei@gmail.com"
] |
skysunwei@gmail.com
|
c8ba7356a356d78dc1fe56f8432722b76b1b8769
|
19e7b93a1bc631f74b7fcf06feeb2574f0a5256a
|
/Guard Game/guard_game.py
|
e9bd7ce874484201bc53c98f3d9b8686d774a0ee
|
[] |
no_license
|
elack33/Google-FooBar
|
0013f2c4ece39d7a903833f57586618acdbd672e
|
f762a9062c37e056436fd7882540346f764a0e95
|
refs/heads/master
| 2021-01-10T20:32:43.848573
| 2015-08-10T06:00:09
| 2015-08-10T06:00:09
| 40,465,625
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
"""
For example, when a guard picks up the medical file for Rabbit #1235,
she would first add those digits together to get 11, then add those together to get 2, her final sum."
Write a function answer(x), which when given a number x,
returns the final digit resulting from performing the above described repeated sum process on x.
"""
def answer(x):
first = str(x)
first_list = []
for each in first:
first_list.append(int(each))
while len(first_list) > 1:
sum_list = sum(first_list)
first_list = []
for each in str(sum_list):
first_list.append(int(each))
return first_list[0]
|
[
"elack33@gmail.com"
] |
elack33@gmail.com
|
5ca8ba65b037dce3702815601253cbec3d63478d
|
6c9e11f4580175a76123dd49f0f4b190c4e975c4
|
/rango/migrations/0002_auto_20180126_0034.py
|
77537d5f2b2b490d2a1e82e8e59da49d6c0b0830
|
[] |
no_license
|
SeDominykas/tango_with_django_project
|
2a92dc536c1989af39bf929c2b05d45dacc1d23f
|
f0fc42059122bf206458a88eff840019b251cbcb
|
refs/heads/master
| 2021-05-11T13:29:37.359733
| 2018-02-09T17:51:01
| 2018-02-09T17:51:01
| 117,229,294
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 724
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-26 00:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
migrations.AddField(
model_name='category',
name='likes',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='category',
name='views',
field=models.IntegerField(default=0),
),
]
|
[
"2262804s@student.gla.ac.uk"
] |
2262804s@student.gla.ac.uk
|
6a76ec73758651caf2df2d1efebcb72d943a0c10
|
346e98e5e2b8cceecbb6507a4601bc5a93827749
|
/rets/parsers/base.py
|
89950a397af07dde6fb66700f66116c9d933f38e
|
[
"MIT"
] |
permissive
|
frozenjava/python-rets
|
1ca8ebd3ae0caf78d54a6cf6868dd91d8da8078e
|
c5f7342b7a3e96d746178d90b11db0f7e1bfdfaa
|
refs/heads/master
| 2020-06-30T04:00:39.173067
| 2016-11-21T17:56:52
| 2016-11-21T17:56:52
| 74,392,000
| 1
| 0
| null | 2016-11-21T18:11:51
| 2016-11-21T18:11:51
| null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
class Base(object):
def __init__(self, session):
self.session = session
@staticmethod
def get_attributes(input_dict):
return {k.lstrip('@'): v for k, v in input_dict.items() if k[0] == '@'}
|
[
"matthew.d.crowson@gmail.com"
] |
matthew.d.crowson@gmail.com
|
d2b4dce53f7011223d3213a2f66577bfd377aac5
|
293f853eebfef51ce44bc1ca1cbe83cc6d757f50
|
/6.地理空间数据的处理/6.4.根据空间位置提取相应参数/6.4.2.根据空间位置提取遥感参数/根据样地点提取特征.py
|
0a4f4a3a002217e9729651eba3b1a3febddcf40a
|
[] |
no_license
|
flyingliang/-Python-
|
9ce9c5898ad940e3014d7a4d5cf74a3c4ba6e5f2
|
fd3326e586b137ecebc2694b394e6c8e06444c48
|
refs/heads/main
| 2023-03-17T14:30:56.411044
| 2021-03-04T02:51:28
| 2021-03-04T02:51:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,437
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 14:31:36 2021
@author: Admin
"""
#根据样地点提取纹理特征
from osgeo import gdal
import numpy as np
import pandas as pd
import os
import osr
from pandas import set_option
class change_coordinate():
def __init__(self, dataset):
self.dataset = dataset
def getSRSPair(self,dataset):
'''
获得给定数据的投影参考系和地理参考系
:param dataset: GDAL地理数据
:return: 投影参考系和地理参考系
'''
prosrs = osr.SpatialReference()
prosrs.ImportFromWkt(self.dataset.GetProjection())
geosrs = prosrs.CloneGeogCS()
return prosrs, geosrs
def lonlat2geo(self, lon, lat):
'''
将经纬度坐标转为投影坐标(具体的投影坐标系由给定数据确定)
:param dataset: GDAL地理数据
:param lon: 地理坐标lon经度
:param lat: 地理坐标lat纬度
:return: 经纬度坐标(lon, lat)对应的投影坐标
'''
prosrs, geosrs = self.getSRSPair(self.dataset)
ct = osr.CoordinateTransformation(geosrs, prosrs)
coords = ct.TransformPoint(lon, lat)
return coords[:2]
def geo2imagexy(self, x, y):
'''
根据GDAL的六 参数模型将给定的投影或地理坐标转为影像图上坐标(行列号)
:param dataset: GDAL地理数据
:param x: 投影或地理坐标x
:param y: 投影或地理坐标y
:return: 影坐标或地理坐标(x, y)对应的影像图上行列号(row, col)
'''
trans = self.dataset.GetGeoTransform()
a = np.array([[trans[1], trans[2]], [trans[4], trans[5]]])
b = np.array([x - trans[0], y - trans[3]])
return np.linalg.solve(a, b) # 使用numpy的linalg.solve进行二元一次方程的求解
def lonlat2rowcol(self,lon,lat):
'''
根据经纬度转行列公式直接转换为行列
'''
# tp = self.lonlat2geo(lon,lat)
geo = self.dataset.GetGeoTransform()
# row = int((tp[0] -geo[0]) / geo[1]+0.5)
# col = int((tp[1] - geo[3]) /geo[5]+0.5)
row = int((lon -geo[0]) / geo[1]+0.5)
col = int((lat - geo[3]) /geo[5]+0.5)
return row,col
class define_window():
'''
:param w 定义窗口大小
:param center_row 中心点行号
:param center_col 中心点列号
'''
def __init__(self,w):
self.w = w
def window_upleft_rowcol(self,center_row,center_col):
upleft_row = center_row - (self.w-1)/2
upleft_col = center_col - (self.w-1)/2
return upleft_row,upleft_col
class make_feature_names():
'''
根据波段编写特征名称,返回特征名称列表
'''
def __init__(self,dataset):
self.nb = dataset.RasterCount
def feature(self,feature_list):
names = []
for i in range(self.nb):
for j in feature_list:
names.append('{}{}{}'.format(j,'_',i))
return names
if __name__ == '__main__':
'''
把图像与坐标放到一个文件夹下
'''
img_dir = r'./使用数据'
out_path=r'./输出数据'
gdal.AllRegister()
img = gdal.Open(os.path.join(img_dir,'500_b0_win7_texture.tif'))
ds = pd.read_excel(os.path.join(img_dir,'point.xls'))
ns = img.RasterXSize
nl = img.RasterYSize
run_change_coordinate = change_coordinate(img)#调用坐标转换函数
w = 7 #窗口大小
run_define_window = define_window(w)#调用窗口定义函数
run_make_feature_names = make_feature_names(img)#调用特征名称函数
names = [ 'mean_1','variance_1','homogeneity_1','contrast_1','dissimilarity_1','entropy_1','sencond_moment_1','correlation_1',
]
lon,lat = ds.iloc[:,1].values,ds.iloc[:,2].values
'''
定义输出列表
:all_out输出每个窗口下所有特征的值
:all_mean输出每个窗口下所有特征的平均值
:all_std输出每个窗口下所有特征的标准差
'''
all_out = []
all_mean = []
all_std = []
for i in range(len(lon)):
ilon,ilat = lon[i],lat[i]
ix,iy = run_change_coordinate.lonlat2rowcol(ilon,ilat)
if ix<0 or ix >ns-1 or iy <0 or iy >nl-1:
print('not in the image: '+str(ds.iat[i,0].value))
upleft_x,upleft_y = run_define_window.window_upleft_rowcol(ix,iy)
ref = img.ReadAsArray(int(upleft_x),int(upleft_y),w,w)
if len(ref.shape) == 3:
df = np.zeros((w*w,len(names)))
for j in range(len(names)):
# print(j)
df[:,j] = list(ref[j].flatten())
df = pd.DataFrame(df,columns=names)
else:
df = pd.DataFrame(ref.flatten())
description = df.describe()
df_mean = description.iloc[1,:]
df_std = description.iloc[2,:]
all_out.append(df)
all_mean.append(df_mean)
all_std.append(df_std)
out = pd.concat(all_out)
out_mean = pd.concat(all_mean)
out_std = pd.concat(all_std)
out.to_csv(os.path.join(out_path,'out.csv'))
out_mean.to_csv(os.path.join(out_path,'out_mean.csv'))
out_std.to_csv(os.path.join(out_path,'out_std.csv'))
|
[
"noreply@github.com"
] |
flyingliang.noreply@github.com
|
fefc253d22ba5bb0ef9b94bef1230f18761a0a2b
|
afa456bb3792e433d84684260cdce1dbc6302cde
|
/authors/apps/tests/test_validation.py
|
d124f479a99ca4cf8c7e3e77f3b359a31f4e9213
|
[
"BSD-3-Clause"
] |
permissive
|
andela/ah-backend-poseidon
|
23ac16e9fcdce49f78df04126f9f486b8c39ebd4
|
d2b561e83ed1e9a585853f4a4e2e37805e86c35c
|
refs/heads/develop
| 2022-12-09T07:38:04.843476
| 2019-07-19T13:44:13
| 2019-07-19T13:44:13
| 158,799,017
| 1
| 4
|
BSD-3-Clause
| 2022-12-08T01:19:16
| 2018-11-23T07:55:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,979
|
py
|
from .base import BaseTestCase
from rest_framework import status
from authors.apps.authentication.models import User
from . import (new_user, data2, invalid_email, invalid_password,
short_password, dup_username, user_login)
class AccountTests(BaseTestCase):
"""handles user registration tests"""
def test_new_user_registration(self):
"""check if new user can be registered"""
response = self.register_user(new_user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertIn("token", response.data)
def test_user_login(self):
"""new user can be logged in\
and token returned on successful login"""
self.verify_user(new_user)
response = self.login_user(user_login)
#raise Exception(response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn("token", response.data)
def test_wrong_token_header_prefix(self):
"""invalid prefix header provided"""
self.client.credentials(HTTP_AUTHORIZATION='hgfds ' + 'poiuytfd')
response = self.client.get("/api/user/", format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_for_invalid_token(self):
"""validates token"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + 'yyuug')
response = self.client.get("/api/user/", format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_no_token_in_header(self):
"""no token in header"""
self.add_credentials(response='')
response = self.client.get("/api/user/", format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_super_user(self):
"""checks for registration of a super user in the User model"""
user = User.objects.create_superuser(
username='ayebare',
password='sampletestcase')
self.assertIn(str(user), str(user.username))
def test_create_non_user(self):
"""check for registration of a client user in the User model"""
user = User.objects.create_user(
email='m16ayebare@gmail.com',
username='ayebare',
password='sampletestcase')
self.assertIn(str(user), str(user.email))
def test_get_user_details(self):
"""get user details"""
self.user_access()
response = self.client.get('/api/user/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_user_details(self):
"""assert update route for user details is accessed"""
self.user_access()
response = self.client.put('/api/user/', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_invalid_email_message(self):
"""test invalid email provided."""
response = self.register_user(invalid_email)
self.assertIn(response.data["errors"]["email"][0],
'Please enter a valid email in the format xxxx@xxx.xxx')
def test_invalid_password(self):
"""asserts invalid password provided."""
response = self.register_user(invalid_password)
self.assertIn(response.data["errors"]["password"][0],
'Password should be alphanuemric (a-z,A_Z,0-9).')
def test_short_password(self):
"""test short password provided."""
response = self.register_user(short_password)
self.assertIn(response.data["errors"]["password"][0],
'Password should not be less than 8 characters.')
def test_duplicate_username(self):
"user with same username provided exists"""
self.register_user(new_user)
response = self.register_user(dup_username)
self.assertIn(response.data["errors"]["username"][0],
'user with this username already exists.')
|
[
"ephraim.malinga@gmail.com"
] |
ephraim.malinga@gmail.com
|
e3ca333be4767b6ab7c5c0ea2b1a31759a831a05
|
666d8bafaaf48aeece624a8865d49c9f30e65bc1
|
/apps/aboutMe/__init__.py
|
6d76589163141b1586083b1eefd250adfcfbd755
|
[] |
no_license
|
xuqichuang/django-blog
|
9bd5fa91d06e252f830526b61a6e65a1ec1d8591
|
675106eaedd3fa1bca716966a983b5bff5625b37
|
refs/heads/master
| 2020-03-21T11:07:20.620217
| 2018-06-28T15:21:50
| 2018-06-28T15:21:50
| 138,490,561
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
default_app_config = 'aboutMe.apps.AboutMeConfig'
|
[
"xqc1319681561@163.com"
] |
xqc1319681561@163.com
|
ebbdaca854cfae851b618959e42d156c15d04934
|
aa29ebd6f1fa3d955ea6ee6b8c8b007e1fd9d946
|
/E-Bigay-Website/ebigay/migrations/0007_auto_20210623_0326.py
|
26d2daee074a34733354f1e1213bd44b10a31f9e
|
[] |
no_license
|
TristanBachini/E-Bigay
|
e92a0f383a20dc7b0116cc6d1cc27c3d55d9c4c6
|
19a426637356d08b81518cea1cb662638df52156
|
refs/heads/master
| 2023-06-05T08:12:30.527759
| 2021-06-26T02:14:02
| 2021-06-26T02:14:02
| 375,227,150
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
# Generated by Django 3.1.7 on 2021-06-22 19:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ebigay', '0006_ayudadropoff_user'),
]
operations = [
migrations.AlterField(
model_name='region',
name='region',
field=models.CharField(choices=[('NCR', 'National Capital Region'), ('CAR', 'Cordillera Administrative Region'), ('Region I', 'Ilocos Region'), ('Region II', 'Cagayan Valley Region'), ('Region III', 'Central Luzon Region'), ('Region IV-A', 'CALABARZON Region'), ('Region IV-B', 'MIMAROPA Region'), ('Region V', 'Bicol Region'), ('Region VI', 'Western Visayas Region'), ('Region VII', 'Central Visayas Region'), ('Region VIII', 'Eastern Visayas Region'), ('Region IX', 'Zamboanga Peninsula Region'), ('Region X', 'Northern Mindanao Region'), ('Region XI', 'Davao Region'), ('Region XII', 'SOCCSKARGEN Region'), ('Region XIII', 'Caraga Region'), ('BARMM', 'Bangsamoro Autonomous Region in Muslim Mindanao')], max_length=50, null=True),
),
]
|
[
"angelo.villaluz0120@gmail.com"
] |
angelo.villaluz0120@gmail.com
|
17905727ac54867d46aec37379e0799268641f3c
|
84ed426c4193b88f5893984925c31dc770b17dcc
|
/zuoye2/visualize.py
|
2348134b7e5ce5c6f14b0189c0d28e4d20ed17bc
|
[] |
no_license
|
wuhenq/sjwj
|
27941eba4170f5564d43311c488f10e63bbe3857
|
35cb5c5ffcedfe0b5af34d7b9604e2c7424d2571
|
refs/heads/master
| 2020-05-19T10:47:25.245916
| 2019-05-05T04:10:09
| 2019-05-05T04:10:09
| 184,977,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,082
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 19 20:15:20 2019
@author: huashuo
"""
import json
import matplotlib.pyplot as plt
dic1 = {}
sups = []
confs = []
lifts = []
with open('rules.json','r') as f:
lines = f.readlines()
for l in lines:
load_dic = json.loads(l)
X = load_dic['X_set'][0][0]
Y = load_dic['Y_set'][0][0]
sup = load_dic['sup']
conf = load_dic['conf']
lift = load_dic['lift']
sups.append(load_dic['sup'])
confs.append(load_dic['conf'])
lifts.append(load_dic['lift'])
if X not in dic1.keys():
new = dict()
new[Y] = [[sup],[conf]]
dic1[X] = new
else:
if Y not in dic1[X].keys():
dic1[X][Y] = [[sup],[conf]]
else:
dic1[X][Y][0].append(sup)
dic1[X][Y][1].append(conf)
#for k in dic1.keys():
# print(k)
plt.scatter(sups,confs,c=lifts,s=20,cmap='Reds')
plt.xlabel('sup')
plt.ylabel('conf')
cb = plt.colorbar()
cb.set_label('lift')
plt.show()
|
[
"2060143747@qq.com"
] |
2060143747@qq.com
|
5ceea524dce0435014a540829eaf781ee0c61dde
|
45356d379cbfd9f127f9f555116c08fcf855d3ee
|
/imagenet_nn_search.py
|
43b7e24a9933f2284b5377f95baa027022176183
|
[
"MIT"
] |
permissive
|
SimuJenni/Correspondences
|
3d312ed68564eb7abe4138e00c5d71714c8efd24
|
384c0272e438ad3e7c936f5ae78fe6154b188c54
|
refs/heads/master
| 2021-01-20T21:19:43.541738
| 2017-09-18T16:03:59
| 2017-09-18T16:03:59
| 101,764,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,790
|
py
|
import os
import tensorflow as tf
from Preprocessor import Preprocessor
from train.AlexNet_NN_search_full import CNetTrainer
from datasets.ImageNet import ImageNet
from models.AlexNet_layers_lrelu import AlexNet
from constants import IMAGENET_VAL_DIR
from scipy import misc
import cv2
def load_val_image(class_id, val_dir=IMAGENET_VAL_DIR):
class_folders = os.listdir(val_dir)
img_names = os.listdir(os.path.join(IMAGENET_VAL_DIR, class_folders[class_id]))
img = misc.imread(os.path.join(IMAGENET_VAL_DIR, class_folders[class_id], img_names[0]), mode='RGB')
img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_CUBIC)
return img
target_shape = [227, 227, 3]
model = AlexNet(batch_size=2000)
data = ImageNet()
preprocessor = Preprocessor(target_shape=target_shape)
ckpt = '/Data/Logs/CNet/imagenet_SDNet_res1_default_baseline_finetune_conv_5/model.ckpt-324174'
#ckpt = '/Data/Logs/CNet/imagenet_AlexNet_sorted_alex_sorted_finetune_conv_4/model.ckpt-450360'
#ckpt = '/Data/Logs/CNet/imagenet_AlexNet_sorted2_alex_sorted_finetune_conv_5/model.ckpt-294132'
trainer = CNetTrainer(model=model, dataset=data, pre_processor=preprocessor, num_epochs=1, tag='inv_tv',
lr_policy='linear', optimizer='adam', init_lr=0.0003, end_lr=0.00003)
# trainer.compute_stats(ckpt, 4, model.name)
# imgs: 0, 3, 15, 26, 87, 95, 98, 146, 221, 229, 237, 259, 348, 378, 388, 422
# for i in range(87, 1000):
# print(i)
# img = load_val_image(i)
# misc.imshow(img)
for i in [26]:
trainer = CNetTrainer(model=model, dataset=data, pre_processor=preprocessor, num_epochs=1, tag='inv_tv',
lr_policy='linear', optimizer='adam', init_lr=0.0003, end_lr=0.00003)
trainer.search_nn(load_val_image(i), ckpt, 4, model.name, i)
|
[
"simujenni@gmail.com"
] |
simujenni@gmail.com
|
24956731c50779fcd3a718ec412d02aee3ab9f6e
|
59697ff78b5e5496c8ef1f71243223b3859aacd0
|
/attractions_recc.py
|
51a11fee19962e054e2458e78ff4217d4642c1a7
|
[] |
no_license
|
Arpppit/Collabrative-Filtering
|
8c248c179762af8813e59fa449762d75f7815ae5
|
c9f96eda613d2dd6d85e32eab96f17b31e652b7d
|
refs/heads/main
| 2023-08-29T14:04:00.247099
| 2021-11-05T17:16:37
| 2021-11-05T17:16:37
| 425,028,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,689
|
py
|
import pandas as pd
import numpy as np
# import ipywidgets as w
# from ipywidgets import HBox, VBox
# from ipywidgets import Layout, widgets
# from IPython.display import display, IFrame, HTML
from utils import Util
from rbm import RBM
import math, re, datetime as dt, glob
from urllib.parse import quote
from urllib.request import Request, urlopen
from google_images_download import google_images_download
from PIL import Image
import requests
from bs4 import BeautifulSoup
import html5lib
#from nltk.corpus import wordnet
import nltk
nltk.download('wordnet')
from nltk.corpus import wordnet
def f(row):
avg_cat_rat = dict()
for i in range(len(row['category'])):
if row['category'][i] not in avg_cat_rat:
avg_cat_rat[row['category'][i]] = [row['rating'][i]]
else:
avg_cat_rat[row['category'][i]].append(row['rating'][i])
for key,value in avg_cat_rat.items():
avg_cat_rat[key] = sum(value)/len(value)
return avg_cat_rat
def sim_score(row):
score = 0.0
match = 0
col1 = row['cat_rat']
col2 = row['user_data']
for key, value in col2.items():
if key in col1:
match+=1
score += (value-col1[key])**2
if match != 0:
return ((math.sqrt(score)/match) + (len(col2) - match))
else:
return 100
def get_recc(att_df, cat_rating):
util = Util()
epochs = 50
rows = 40000
alpha = 0.01
H = 128
batch_size = 16
dir= 'etl/'
ratings, attractions = util.read_data(dir)
ratings = util.clean_subset(ratings, rows)
rbm_att, train = util.preprocess(ratings)
num_vis = len(ratings)
rbm = RBM(alpha, H, num_vis)
joined = ratings.set_index('attraction_id').join(attractions[["attraction_id", "category"]].set_index("attraction_id")).reset_index('attraction_id')
grouped = joined.groupby('user_id')
category_df = grouped['category'].apply(list).reset_index()
rating_df = grouped['rating'].apply(list).reset_index()
cat_rat_df = category_df.set_index('user_id').join(rating_df.set_index('user_id'))
cat_rat_df['cat_rat'] = cat_rat_df.apply(f,axis=1)
cat_rat_df = cat_rat_df.reset_index()[['user_id','cat_rat']]
cat_rat_df['user_data'] = [cat_rating for i in range(len(cat_rat_df))]
cat_rat_df['sim_score'] = cat_rat_df.apply(sim_score, axis=1)
user = cat_rat_df.sort_values(['sim_score']).values[0][0]
print("Similar User: {u}".format(u=user))
filename = "e"+str(epochs)+"_r"+str(rows)+"_lr"+str(alpha)+"_hu"+str(H)+"_bs"+str(batch_size)
reco, weights, vb, hb = rbm.load_predict(filename,train,user)
unseen, seen = rbm.calculate_scores(ratings, attractions, reco, user)
rbm.export(unseen, seen, 'recommendations/'+filename, str(user))
return filename, user, rbm_att
def filter_df(filename, user, low, high, province, att_df):
recc_df = pd.read_csv('recommendations/'+filename+'/user{u}_unseen.csv'.format(u=user), index_col=0)
recc_df.columns = ['attraction_id', 'att_name', 'att_cat', 'att_price', 'score']
recommendation = att_df[['attraction_id','name','category','city','latitude','longitude','price','province', 'rating']].set_index('attraction_id').join(recc_df[['attraction_id','score']].set_index('attraction_id'), how="inner").reset_index().sort_values("score",ascending=False)
filtered = recommendation[(recommendation.province == province) & (recommendation.price >= low) & (recommendation.price >= low)]
url = pd.read_json('outputs/attractions_cat.json',orient='records')
url['id'] = url.index
with_url = filtered.set_index('attraction_id').join(url[['id','attraction']].set_index('id'), how="inner")
print(with_url.head())
return with_url
def get_image(name):
url = url =f'https://www.google.com/search?q={name}&hl=en-GB&source=lnms&tbm=isch&sa=X&ved=2ahUKEwi77e_zg_zzAhU64zgGHWyiCYgQ_AUoA3oECAEQBQ&biw=1920&bih=1007'
res = requests.get(url)
bs =BeautifulSoup(res.content, 'html5lib')
table = bs.find_all('img')
if len(table) >=6:
return table[5].get('src')
else:
return table[1].get('src')
# def get_image(name):
# name = name.split(",")[0]
# response = google_images_download.googleimagesdownload()
# args_list = ["keywords", "keywords_from_file", "prefix_keywords", "suffix_keywords",
# "limit", "format", "color", "color_type", "usage_rights", "size",
# "exact_size", "aspect_ratio", "type", "time", "time_range", "delay", "url", "single_image",
# "output_directory", "image_directory", "no_directory", "proxy", "similar_images", "specific_site",
# "print_urls", "print_size", "print_paths", "metadata", "extract_metadata", "socket_timeout",
# "thumbnail", "language", "prefix", "chromedriver", "related_images", "safe_search", "no_numbering",
# "offset", "no_download"]
# args = {}
# for i in args_list:
# args[i]= None
# args["keywords"] = name
# args['limit'] = 1
# params = response.build_url_parameters(args)
# url = 'https://www.google.com/search?q=' + quote(name) + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch' + params + '&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
# try:
# response.download(args)
# for filename in glob.glob("downloads/{name}/*jpg".format(name=name)) + glob.glob("downloads/{name}/*png".format(name=name)):
# return filename
# except:
# for filename in glob.glob("downloads/*jpg"):
# return filename
def top_recc(with_url, final):
i=0
print(with_url)
print(final)
try:
while(1):
first_recc = with_url.iloc[[i]]
if(first_recc['name'].values.T[0] not in final['name']):
final['name'].append(first_recc['name'].values.T[0])
final['location'].append(first_recc[['latitude','longitude']].values.tolist()[0])
final['price'].append(first_recc['price'].values.T[0])
final['rating'].append(first_recc['rating'].values.T[0])
final['image'].append(get_image(first_recc['name'].values.T[0]))
#final['image'].append('www.google.com/image')
final['category'].append(first_recc['category'].values.T[0])
return final
else:
i+=1
except Exception as e:
return final
def find_closest(with_url, loc, tod, final):
syns1 = wordnet.synsets("evening")
syns2 = wordnet.synsets("night")
evening = [word.lemmas()[0].name() for word in syns1] + [word.lemmas()[0].name() for word in syns2]
distance = list()
for i in with_url[['latitude','longitude']].values.tolist():
distance.append(math.sqrt((loc[0]-i[0])**2 + (loc[1]-i[1])**2))
with_dist = with_url
with_dist["distance"] = distance
sorted_d = with_dist.sort_values(['distance','price'], ascending=['True','False'])
if tod == "Evening":
mask = sorted_d.name.apply(lambda x: any(j in x for j in evening))
else:
mask = sorted_d.name.apply(lambda x: all(j not in x for j in evening))
final = top_recc(sorted_d[mask], final)
return final
def final_output(days, final):
time = ['MORNING', 'EVENING']
fields = ['NAME', 'CATEGORY', 'LOCATION', 'PRICE', 'RATING']
recommendations = ['Recommendation 1:', 'Recommendation 2:']
# box_layout = Layout(justify_content='space-between',
# display='flex',
# flex_flow='row',
# align_items='stretch',
# )
# column_layout = Layout(justify_content='space-between',
# width='75%',
# display='flex',
# flex_flow='column',
# )
tab = {}
tab['name']=[]
tab['image']=[]
tab['price']=[]
tab['rating']=[]
tab['category']=[]
tab['location']=[]
for i in range(days):
tab['image'].append(final['image'][i*4:(i+1)*4])
#images = [open(i, "rb").read() for i in images]
tab['name'].append([re.sub('_',' ',i).capitalize() for i in final['name'][i*4:(i+1)*4]])
tab['category'].append([re.sub('_',' ',i).capitalize() for i in final['category'][i*4:(i+1)*4]])
tab['location'].append(["("+str(i[0])+","+str(i[1])+")" for i in final['location'][i*4:(i+1)*4]])
tab['price'].append([str(i) for i in final['price'][i*4:(i+1)*4]])
tab['rating'].append([str(i) for i in final['rating'][i*4:(i+1)*4]])
#print('Final Recommendations are: ',price, rating,location,category,name)
return tab
|
[
"arppitmadankar@gmail.com"
] |
arppitmadankar@gmail.com
|
e0debeba34e3216434f8060f919e057b302b2c58
|
2b86c6b3579c72d5c9bacc3948adf9243d939481
|
/ArgParse.py
|
33a5e91ef27e26df2240b43fdc1e4bde6924831d
|
[
"MIT"
] |
permissive
|
DeathPoison/tinker-cnc
|
e5c7864b2ebf2147b6046d442fd6dc5770609b4d
|
91fd33332665e130f93b6951a19514fbc9787781
|
refs/heads/master
| 2021-01-20T12:42:48.170750
| 2013-08-27T09:45:26
| 2013-08-27T09:45:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,587
|
py
|
#!/usr/bin/env python
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', action='store', dest='simple_value',
help='Store a simple value')
parser.add_argument('-c', action='store_const', dest='constant_value',
const='value-to-store',
help='Store a constant value')
parser.add_argument('-t', action='store_true', default=False,
dest='boolean_switch',
help='Set a switch to true')
parser.add_argument('-f', action='store_false', default=False,
dest='boolean_switch',
help='Set a switch to false')
parser.add_argument('-a', action='append', dest='collection',
default=[],
help='Add repeated values to a list',
)
parser.add_argument('-A', action='append_const', dest='const_collection',
const='value-1-to-append',
default=[],
help='Add different values to list')
parser.add_argument('-B', action='append_const', dest='const_collection',
const='value-2-to-append',
help='Add different values to list')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
results = parser.parse_args()
print 'simple_value =', results.simple_value
print 'constant_value =', results.constant_value
print 'boolean_switch =', results.boolean_switch
print 'collection =', results.collection
print 'const_collection =', results.const_collection
|
[
"DeathPoison.DC@gmail.com"
] |
DeathPoison.DC@gmail.com
|
f49ce359b7ffb83fdcac73b84fab03ed2e15aa3d
|
f8e7f0241de0d17295b04ffe71d1359ef0aaa3ff
|
/digits_in_list.py
|
f639c73e6e5fc066961a30d55a68a5a822dac38e
|
[] |
no_license
|
kosskiev/University-of-Michigan
|
6332c20666d9fd4511f2588a9359355291c04cb5
|
fb37f107c7e254881edb1f31365a82946ca360fa
|
refs/heads/main
| 2023-02-19T02:33:44.630869
| 2021-01-19T20:00:34
| 2021-01-19T20:00:34
| 317,478,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
#The string module provides sequences of various types of Python characters. It has an attribute called digits that produces the string ‘0123456789’.
#Import the module and assign this string to the variable nums. Below, we have provided a list of characters called chars.
#Using nums and chars, produce a list called is_num that consists of tuples.
#The first element of each tuple should be the character from chars, and the second element should be a Boolean that reflects whether or not it is a Python digit.
#I do not use - num = string.digits
import string
chars = ['h', '1', 'C', 'i', '9', 'True', '3.1', '8', 'F', '4', 'j'] #some text
num = string.digits
is_num = ()
spisok1 = []
for i in range(len(chars)):
spisok = ()
if chars[i].isdigit():
spisok += (chars[i], True)
spisok1.append(spisok)
else:
spisok += (chars[i], False)
spisok1.append(spisok)
is_num = spisok1
print(is_num)
|
[
"noreply@github.com"
] |
kosskiev.noreply@github.com
|
8562913d19df6e29366246a74cfb3818c2b42ba8
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/pygame/pygameweb/pygameweb/config.py
|
93d8fc8e758f4623cd6c55d2070b53f047f96a2d
|
[
"BSD-2-Clause"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:3ead591f9a215ef79ce67657f0809e549584a72ef37757eb3272ca4fbba1ab78
size 2948
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
072f0c80d4043ff14c2008f0175536b215afaba2
|
e5e8092fbb4c9a5db269a190de6ed3463e81d550
|
/env/bin/django-admin
|
b43568894f124da0b0dcf0d018964da9127d39c3
|
[] |
no_license
|
j0konda/computer_wizard
|
73a4210e75adfeed7c1680af073093a9015348d4
|
e4c4a8352922db2aeca215707ab9eddb2784a437
|
refs/heads/main
| 2023-08-11T01:37:02.650715
| 2021-09-28T06:05:43
| 2021-09-28T06:05:43
| 410,764,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
#!/home/kaf_pas/Job/computer_wizard/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"chivanov1986@mail.ru"
] |
chivanov1986@mail.ru
|
|
74d144e10c81c97b82aeabaa53b4b2de19b18c40
|
2044416164c1f094975aa6505d05cfc1bd77bca1
|
/5.py
|
4cf812d0e21e28739ad9bb5ac730286833168ac5
|
[] |
no_license
|
Kalyan009/set2
|
3df0749c6aa3cb0d75eed1793ccfae70ca48daa5
|
ee02c8e8af3d448f16f337166c27cc14cf63bd2d
|
refs/heads/master
| 2020-06-14T10:18:46.956087
| 2019-07-04T06:13:13
| 2019-07-04T06:13:13
| 194,979,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52
|
py
|
a=int(input())
for i in range(1,6):
print(i*a)
|
[
"noreply@github.com"
] |
Kalyan009.noreply@github.com
|
e3ddebc4c031416506a32d27e37d33bae40a39e5
|
6afd26ee9efe6277724844fe9a49652c7a54ab53
|
/ProbabilityDistribution.py
|
864ded5762218cab302d14fc408bebe42f6ff8b1
|
[
"MIT"
] |
permissive
|
jingr1/SelfDrivingCar
|
4d027249c1fb0ea8edcf5dd6ee6d7df440730edb
|
2e8fe793ff605387a3a20936af337e1447a8559a
|
refs/heads/master
| 2021-08-19T13:48:40.760731
| 2017-11-26T13:30:54
| 2017-11-26T13:30:54
| 106,690,219
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,485
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-10-15 15:15:37
# @Author : jingray (lionel_jing@163.com)
# @Link : http://www.jianshu.com/u/01fb0364467d
# @Version : $Id$
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import numpy as np
def plot_uniform(x_minimum, x_maximum, tick_interval):
x = range(x_minimum, x_maximum + 1)
# TODO: Using x_maximum and x_minimum, calculate the height of the
# rectangle that represents the uniform probability distribution
# Recall that the rectangle area should be 1 for a uniform continuous
# distribution
y = 1/(x_maximum - x_minimum)
plt.bar(x_minimum, y, bottom=0, width= (x_maximum - x_minimum), align='edge', alpha=0.5)
plt.xlabel('Degrees')
plt.ylabel('Probability Distribution')
plt.title('Uniform Probability Distribution \n for a Spinning Bottle')
plt.xticks(np.arange(min(x), max(x)+1, tick_interval))
plt.ylim(0, .3)
plt.show()
plot_uniform(5, 10, 1)
import matplotlib.pyplot as plt
import numpy as np
def bar_heights(intervals, probabilities, total_probability):
heights = []
# normalize probability intervals
total_relative_prob = sum(probabilities) # calculate the sum of a list very concise!!!
for i in range(0, len(probabilities)):
bar_area = probabilities[i]*total_probability/total_relative_prob
heights.append(bar_area/(intervals[i+1]-intervals[i]))
return heights
def plot_discrete(intervals, probabilities, total_probability):
heights = bar_heights(intervals, probabilities, total_probability)
freqs = np.array(heights)
bins = np.array(hour_intervals)
widths = bins[1:] - bins[:-1] #calculate the time interval widths, very good!!!
freqs = freqs.astype(np.float)
tick_interval = 1
plt.bar(bins[:-1], freqs, width=widths, align='edge', edgecolor='black', alpha=0.5)
plt.xlabel('Interval')
plt.ylabel('Probability Distribution')
plt.title('Probability Distribution')
plt.xticks(np.arange(min(bins), max(bins)+1, tick_interval))
plt.show()
hour_intervals = [0, 5, 10, 16, 21, 24]
probability_intervals = [1, 5, 3, 6, 1/2]
accident_probability = 0.05
plot_discrete(hour_intervals,probability_intervals,accident_probability)
# Robot World 1-D
import matplotlib.pyplot as plt
import numpy as np
def initialize_robot(grid_size):
#grid = [1/grid_size for i in range(0,grid_size)]
grid = [1/grid_size] * grid_size
return grid
def grid_probability(grid, position):
try:
return grid[position]
except:
return None
def output_map(grid):
x_labels = range(len(grid))
#x_data = np.array(x_labels)
#y_data = np.array(grid)
#plt.bar(x_data, y_data, width=0.7, edgecolor='black')
plt.bar(x_labels, height=grid, width=0.7, edgecolor='black')
plt.xlabel('Grid Space')
plt.ylabel('Probability')
plt.title('Probability of the robot being at each space on the grid')
plt.xticks(np.arange(min(x_labels), max(x_labels)+1, 1))
plt.show()
def update_probabilities(grid, updates):
#for i in range(len(updates)):
# grid[updates[i][0]]=updates[i][1]
for update in updates:
grid[update[0]] = update[1]
return grid
print(update_probabilities([0.2, 0.2, 0.2, 0.2, 0.2], [[0, .25], [4, 0.15]]))
#2-D Self-Driving Car World
import matplotlib.pyplot as plt
from pandas import DataFrame
class SelfDrivingCar():
def __init__(self, rows, columns):
self.grid = []
self.grid_size = [rows,columuns]
self.num_elements = rows * columns
def initialize_grid(self):
probability = 1/self.num_elements
for i in range(self.grid_size[0]):
list = []
for j in range(self.grid_size[1]):
list.append(probability)
self.grid.append(list)
return self.grid
def output_probability(self, grid_point):
try:
return self.grid[grid_point[0]][grid_point[1]]
else:
return None
def update_probability(self, update_list):
for update in update_list:
self.grid[update[0]][update[1]]=update[2]
return self.grid
def visualize_probability(self):
if not self.grid:
self.grid = [[0],[0]]
else:
plt.imshow(self.grid, cmap='Greys', clim=(0,.1))
plt.title('Heat Map of Grid Probabilities')
plt.xlabel('grid x axis')
plt.ylabel('grid y axis')
plt.show()
car = SelfDrivingCar(5,4)
car.initialize_grid()
# should output 0.05
print(car.output_probability([2,3]))
# should output 0.05
print(car.output_probability([1,2]))
car.update_probability([[2,3,.2], [1,2,.1]])
# should output 0.2
print(car.output_probability([2,3]))
# should output 0.1
print(car.output_probability([1,2]))
# should output a heat map
car.visualize_probability()
#Central Limit Theorem
#normal distribution, also called a Gaussian distribution.
#The normal distribution appears throughout self-driving car applications
#especially with sensor measurements and tracking objects that move around the vehicle.
# import libraries used in the notebook
%matplotlib inline
import numpy as np
from scipy import stats
from matplotlib import mlab
import matplotlib.pyplot as plt
# Set figure height and width
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 8
fig_size[1] = 6
plt.rcParams["figure.figsize"] = fig_size
x = np.linspace(-12, 12, 100)
plt.title('Normal distribution \n mean = 0 \n standard deviation = ' + str(3))
plt.xlabel('value')
plt.ylabel('distribution')
plt.plot(x,mlab.normpdf(x, 0, 3))
###Probability Distributions###
x = np.linspace(-12, 12, 100)
plt.subplot(221)
plt.plot(x,mlab.normpdf(x, 0, 3))
plt.title('Normal Distribution')
plt.subplot(222)
plt.plot(x,stats.uniform.pdf(x,-5,10))
plt.title('Uniform Distribution')
plt.subplot(223)
plt.plot(x[x > -1],stats.chi2.pdf(x[x>-1],3))
plt.title('Chi2 Distribution')
plt.subplot(224)
plt.plot(x[x > -1],stats.lognorm.pdf(x[x > -1],3))
plt.title('Logarithmic Distribution')
plt.subplots_adjust(hspace=.5)
###different probability distributions still work with the central limit theorem
#
### Probability distributions
def random_uniform(low_value, high_value, num_samples):
return np.random.uniform(low_value, high_value, num_samples)
### Poisson Distribution
def poisson_distribution(expectation, num_samples):
return np.random.poisson(expectation, num_samples)
def binomial_distribution(result, probability, trials):
return np.random.binomial(result, probability, trials)
uniform = random_uniform(1, 5, 100000)
poisson = poisson_distribution(6.0, 10000)
binomial = binomial_distribution(1, 0.5, 10000)
### Shows Central Limit Theorem: takes samples from a distribution and calculates the mean of each sample.
#
# variables:
# distribution => array containing values from a population
# iterations => number of times to draw samples and calculate the mean of the sample
# num_samples => sample size
# num_bins => controls number of bins in the histograms3
#
# outputs:
# (1) summary statistics of the population and the means of the samples
# (2) histogram of the population and means of the samples
# (3) normalized histogram of the means and line chart of equivalent normal distribution with same mean and stdeviation
# (4) probability plot of the original distribution and the means of the samples
#
###
def sample_means_calculator(distribution, iterations, num_samples, num_bins):
# take samples from the distribution and calculate the mean of each sample
sample_means = []
# iterate through picking samples and calculating the mean of each sample
for iteration in range(iterations):
samples = []
# iterate through for the sample size chosen and randomly pick samples
for sample in range(num_samples):
samples.append(distribution[np.random.randint(1,len(distribution))])
# calculate the mean of the sample
sample_means.append(np.mean(samples))
# Calculate summary statistics for the population and the sample means
population_mean = np.average(distribution)
population_median = np.median(distribution)
population_deviation = np.std(distribution)
sample_mean = np.mean(sample_means)
sample_median = np.median(sample_means)
sample_deviation = np.std(sample_means)
print('population mean ', population_mean, ' \n population median ', population_median, '\n population standard deviation ', population_deviation)
print('\n mean of sample means ', sample_mean, '\n median of sample means ', sample_median, '\n standard deviation of sample means ', sample_deviation)
# histogram of the population and histogram of sample means
fig = plt.figure(figsize=(8, 4))
ax1 = plt.subplot(121)
plt.hist(distribution, bins=num_bins)
plt.title('Histogram of the Population')
plt.xlabel('Value')
plt.ylabel('Count')
ax2 = plt.subplot(122, sharex=ax1, sharey=ax1)
plt.hist(sample_means, bins=num_bins)
plt.title('Histogram of the Sample Means')
plt.xlabel('Value')
plt.ylabel('Count')
plt.show()
# normalized histogram of the sample means and an equivalent normal distribution with same mean and standard deviation
fig = plt.figure(figsize=(8, 3))
plt.hist(sample_means, bins=num_bins, normed=True)
plt.title('Normalized Histogram of Sample Means and \n Equivalent Normal Distribution')
plt.xlabel('Value')
plt.ylabel('Count')
x = np.linspace(min(sample_means), max(sample_means), 1000)
plt.plot(x,mlab.normpdf(x, sample_mean, sample_deviation))
plt.show()
# probability plots showing how the sample mean distribution is more normal than the population mean
fig = plt.figure(figsize=(8, 3))
ax5 = plt.subplot(121)
stats.probplot(probability_distribution, plot=plt)
ax6 = plt.subplot(122)
stats.probplot(sample_means, plot=plt)
ax5.set_title('Probability Plot of the Population')
ax6.set_title('Probability Plot of the Sample Means')
plt.show()
### Take samples and calculate the sample means for central limit theorem
sample_means_calculator(uniform, 100000, 50, 100)
sample_means_calculator(random_uniform(1,10,100000), 10000, 50, 100)
sample_means_calculator(poisson_distribution(6.0,500000), 10000, 90, 100)
sample_means_calculator(binomial_distribution(1, 0.5, 10000), 10000, 200, 100)
|
[
"lionel_jing@163.com"
] |
lionel_jing@163.com
|
8e2be3cccebc89d74b94fdd3aa2c5155949ca1db
|
3697477f1a65ec3cfcf397cbe16fb4ac1296dc1d
|
/mymqttcloud/learn_mqtt/mqtt_pub.py
|
052f39f8348e0919e22153bf2aefb337b0e3f935
|
[] |
no_license
|
Boris17210/development
|
967f8ea5e40928c8acc192aea59bc5fd3f48718b
|
d9be580dddc010a4b491effc78c2b72c4d36d995
|
refs/heads/master
| 2021-12-15T12:30:52.851725
| 2017-08-14T23:45:53
| 2017-08-14T23:45:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
import time
import paho.mqtt.client as mqtt
mqtt_user = "username"
mqtt_passwd = "password"
mqtt_host = "127.0.0.1"
mqtt_port = 1883
mqttc = mqtt.Client()
# Connect
try:
mqttc.username_pw_set(mqtt_user, mqtt_passwd)
mqttc.connect(mqtt_host, mqtt_port, 60)
mqttc.loop_start()
except Exception:
print "Could not connect to MQTT"
else:
print "Connected to MQTT"
# Loop
while 1:
mqttc.publish("test/hello","Hello World",2)
time.sleep(1)
# Close
mqttc.loop_stop()
mqttc.disconnect()
|
[
"glyn.hudson@openenergymonitor.org"
] |
glyn.hudson@openenergymonitor.org
|
8abc73b0065a47c4b5390fd5729373b3d6ece852
|
6900ab36a3e1ffd30a7d9f2f425bb6fea2b8fa16
|
/pythum/qublock.py
|
8a4ae4b98145592aa258dfb0b66d7ff8a1797574
|
[
"MIT"
] |
permissive
|
fernando7jr/pythum
|
c7f0034d9adef4394828118395f3200c4cdd3c05
|
068e3259378bffb3ce448ed3f6af62176dc530d6
|
refs/heads/master
| 2021-10-02T19:36:30.568344
| 2018-11-30T10:59:36
| 2018-11-30T10:59:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,188
|
py
|
from typing import Union, List
from pythum.qubit import Qubit
class Qublock:
"""An list-like block of n qubits"""
def __init__(self, value: Union[int, 'Qublock', List[Qubit]]):
_type = type(value)
if _type is int:
# Just instanciate the block with n qubits
n = value
value = [
Qubit()
for i in range(n)
]
elif issubclass(_type, Qublock):
# Copy each qubit
qublock = value
value = [Qubit.from_qubit(qubit) for qubit in qublock.__qubits]
elif _type is not list:
raise ValueError("Expected an int or list of Qubits")
self.__qubits = value
@classmethod
def from_notation(cls, value: str) -> 'Qublock':
block = value.split(">")
value = [
Qubit.from_notation("{0}>".format(q))
for q in block
]
return cls(value)
def __str__(self):
return "".join((
str(qbit)
for qbit in self.__qubits
))
def __repr__(self):
return str(self)
def __in__(self, value) -> bool:
return value in self.__qubits
def __iter__(self):
return iter(self.__qubits)
def __len__(self):
return len(self.__qubits)
def __getitem__(self, pos: int):
return self.__qubits[pos]
def __setitem__(self, pos: int, qubit: Qubit):
self.__qubits[pos] = qubit
class Qubyte(Qublock):
def __init__(self, value: Union[Qublock, List[Qubit]]=None):
if value is None:
value = 8
if type(value) is int and value > 8:
value = 8
super().__init__(value)
@classmethod
def from_notation(cls, value: str) -> 'Qubyte':
block = value.split(">")[:-1]
value = [
Qubit.from_notation("{0}>".format(q))
for q in block
]
_len = len(value)
if _len < 8:
value = (
Qubit()
for i in range(_len, 8)
) + value
elif _len > 8:
value = value[-8:]
return cls(value)
|
[
"fernando.settijunior@gmail.com"
] |
fernando.settijunior@gmail.com
|
a23a23682ec9f9c9254fc8be44cfb6198ae11dc8
|
4bf4b0e98a9923cc6d5b27c643108871133fc729
|
/Mechinal Turk Consensus Algorithm/vp/migrations/0065_auto_20160510_2234.py
|
0a0b9b9a956c7c5dae5b0d4d0efe8e25e78d7172
|
[] |
no_license
|
JustinHinh/AppyHour
|
85e6e5b51262ee0c83f3314d60af927bd059e7ed
|
b684acb59ed93abc411e529bbf9841cde5ac22cd
|
refs/heads/main
| 2023-02-04T23:45:06.984688
| 2020-12-28T23:01:09
| 2020-12-28T23:01:09
| 144,486,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('vp', '0064_auto_20160508_1319'),
]
operations = [
migrations.RemoveField(
model_name='mturklocationinfostat',
name='usLocaleRequired',
),
migrations.AddField(
model_name='mturklocationinfostat',
name='localeRequired',
field=models.CharField(max_length=3, null=True),
),
]
|
[
"76710423+JustinHinh@users.noreply.github.com"
] |
76710423+JustinHinh@users.noreply.github.com
|
baf6d43bb76cf966f9aafce6ee12d8dd8e818f72
|
a74cabbe1b11fc8ef575ea86f2543cd95db78ec9
|
/python_program/q783_Minimum_Distance_Between_BST_Nodes.py
|
4e4fe499e62126c9b084fde6bd89e951b18accbf
|
[] |
no_license
|
tszandy/leetcode
|
87e3ccf291b2879637d2d8238935a455b401a78a
|
f1f4361541dcffbb291285663c8820d7ffb37d2f
|
refs/heads/master
| 2023-04-06T15:34:04.847875
| 2023-03-26T12:22:42
| 2023-03-26T12:22:42
| 204,069,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
from typing import List
from collections import Counter,defaultdict
from math import *
from functools import reduce,lru_cache,total_ordering
import numpy as np
from heapq import *
from bisect import bisect_left,bisect_right
from itertools import count
import queue
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def minDiffInBST(self, root: Optional[TreeNode]) -> int:
return self.min_difference(root)
def min_difference(self,node):
if node == None:
return float("inf")
node_left_min = float("inf")
if node.left!=None:
node_left_min = node.val-self.max_left(node.left)
node_right_min = float("inf")
if node.right!=None:
node_right_min = self.max_right(node.right)-node.val
left_min = self.min_difference(node.left)
right_min = self.min_difference(node.right)
return min(node_left_min,node_right_min,left_min,right_min)
def max_left(self,node):
if node.right == None:
return node.val
else:
return self.max_left(node.right)
def max_right(self,node):
if node.left == None:
return node.val
else:
return self.max_right(node.left)
sol = Solution()
# input
[4,2,6,1,3]
[1,0,48,null,null,12,49]
[1,0]
[2,0,5]
[2,0,6]
[5,0,13]
# output
output = sol.minDiffInBST(root)
# answer
answer = ""
print(output, answer, answer == output)
|
[
"444980834@qq.com"
] |
444980834@qq.com
|
18281579b1d71b97a1f3f2b924b6b950572c7b81
|
4ea643c2074ccd8826043e246df2e4a766936878
|
/server/configurer.py
|
f343e2bcb7c1ee633bdc7845c96478b65e813175
|
[] |
no_license
|
intrepiditee/IFTTT-Privacy
|
16bcc206ef3878b714567d643fbc45aecb1c4ee7
|
0c3db5df682368592957143142da09b9ad87e1ac
|
refs/heads/master
| 2020-08-21T23:46:03.069728
| 2019-12-11T07:31:27
| 2019-12-11T07:31:27
| 216,274,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
import json
import threading
class Configurer:
def __init__(self, path, key="sensors"):
with open(path, "rt") as f:
configs = json.load(f)
self.configs = configs[key]
self.index = 0
self.lock = threading.Lock()
def get(self):
self.lock.acquire()
config = self.configs[self.index];
self.index += 1
if self.index >= len(self.configs):
self.index = 0
self.lock.release()
return config
|
[
"jay.shijunjie@gmail.com"
] |
jay.shijunjie@gmail.com
|
d76e46afa9347a3212afc1f391dab391766e7696
|
a36501f44a09ca03dd1167e1d7965f782e159097
|
/app/extensions/mongobeat/models.py
|
27451e0eefe1a01350156a088481e408b9a33cd9
|
[
"Apache-2.0"
] |
permissive
|
ssfdust/full-stack-flask-smorest
|
9429a2cdcaa3ff3538875cc74cff802765678d4b
|
4f866b2264e224389c99bbbdb4521f4b0799b2a3
|
refs/heads/master
| 2023-08-05T08:48:03.474042
| 2023-05-07T01:08:20
| 2023-05-07T01:08:20
| 205,528,296
| 39
| 10
|
Apache-2.0
| 2023-08-31T00:18:42
| 2019-08-31T10:12:25
|
Python
|
UTF-8
|
Python
| false
| false
| 7,149
|
py
|
# Copyright 2019 RedLotus <ssfdust@gmail.com>
# Author: RedLotus <ssfdust@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018 Regents of the University of Michigan
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
"""
app.extensions.mongobeat
~~~~~~~~~~~~~~~~~~~~~~~~~
MongoBeat的ORM模块
"""
import datetime
from ast import literal_eval
import celery.schedules
from celery import current_app
from mongoengine import (
BooleanField,
DateTimeField,
DictField,
DynamicDocument,
DynamicField,
EmbeddedDocument,
EmbeddedDocumentField,
IntField,
ListField,
StringField,
)
def get_periodic_task_collection():
"""获取表名"""
if (
hasattr(current_app.conf, "CELERY_MONGODB_SCHEDULER_COLLECTION")
and current_app.conf.CELERY_MONGODB_SCHEDULER_COLLECTION
):
return current_app.conf.CELERY_MONGODB_SCHEDULER_COLLECTION # pragma: no cover
return "schedules"
#: Authorized values for PeriodicTask.Interval.period
PERIODS = ("days", "hours", "minutes", "seconds", "microseconds")
class PeriodicTask(DynamicDocument):
"""
周期任务的ORM
:attr name: 定时名称
:attr task: 任务名称
:attr interval: 定时
:attr crontab: crontab
:attr args: 参数
:attr kwargs: 键值参数
:attr queue: 队列
:attr no_changes: nochanges
:attr exchange: AMPQ的交换器
:attr routing_key: AMPQ路由
:attr soft_time_limit: 软时间限制
:attr expires: 过期时间
:attr start_after: 在某时间后运行
:attr enabled: 启用
:attr last_run_at: 最后运行时间
:attr total_run_count: 总计运行次数
:attr max_run_count: 最大运行次数
:attr date_changed: 改变日期
:attr description: 描述
:attr run_immediately: 立刻运行
"""
meta = {"collection": get_periodic_task_collection(), "allow_inheritance": True}
class Interval(EmbeddedDocument):
"""
:attr every 每(周期)
:attr period 周期区间
"""
meta = {"allow_inheritance": True}
every = IntField(min_value=0, default=0, required=True, verbose_name="周期")
period = StringField(choices=PERIODS, verbose_name="每")
@property
def schedule(self):
return celery.schedules.schedule(
datetime.timedelta(**{self.period: self.every})
)
@property
def period_singular(self):
return self.period[:-1]
def __str__(self):
if self.every == 1:
return "every {0.period_singular}".format(self)
return "every {0.every} {0.period}".format(self)
class Crontab(EmbeddedDocument):
"""
:attr minute 分钟
:attr hour 小时
:attr day_of_week 周
:attr day_of_month 日
:attr mouth_of_year 月
"""
meta = {"allow_inheritance": True}
minute = StringField(default="*", required=True, verbose_name="分钟")
hour = StringField(default="*", required=True, verbose_name="小时")
day_of_week = StringField(default="*", required=True, verbose_name="周")
day_of_month = StringField(default="*", required=True, verbose_name="日")
month_of_year = StringField(default="*", required=True, verbose_name="月")
@property
def schedule(self):
return celery.schedules.crontab(
minute=self.minute,
hour=self.hour,
day_of_week=self.day_of_week,
day_of_month=self.day_of_month,
month_of_year=self.month_of_year,
)
def __str__(self):
def rfield(f):
return f and str(f).replace(" ", "") or "*"
return "{0} {1} {2} {3} {4} (分/时/周/日/月)".format(
rfield(self.minute),
rfield(self.hour),
rfield(self.day_of_week),
rfield(self.day_of_month),
rfield(self.month_of_year),
)
name = StringField(unique=True, verbose_name="定时名称")
task = StringField(required=True, verbose_name="任务名称")
args = ListField(DynamicField(), verbose_name="参数")
kwargs = DictField(verbose_name="键值参数")
queue = StringField(verbose_name="队列")
exchange = StringField(verbose_name="AMPQ的交换器")
routing_key = StringField(verbose_name="AMPQ路由")
soft_time_limit = IntField(verbose_name="软时间限制")
expires = DateTimeField(verbose_name="过期时间")
start_after = DateTimeField(verbose_name="在某时间后运行")
enabled = BooleanField(default=False, verbose_name="启用")
last_run_at = DateTimeField(verbose_name="最后运行时间")
total_run_count = IntField(min_value=0, default=0, verbose_name="总计运行次数")
max_run_count = IntField(min_value=0, default=0, verbose_name="最大运行次数")
date_changed = DateTimeField(verbose_name="改变日期")
description = StringField(verbose_name="描述")
run_immediately = BooleanField(verbose_name="立刻运行")
type = StringField(
required=True, verbose_name="类型", choices=["crontab", "interval"]
)
interval = EmbeddedDocumentField(Interval, verbose_name="定时")
crontab = EmbeddedDocumentField(Crontab, verbose_name="周期")
# objects = managers.PeriodicTaskManager()
no_changes = False
def clean(self):
"""透过MongoEngine验证interval和crontab不是同时存在"""
if self.type == "crontab":
self.interval = None
else:
self.crontab = None
if isinstance(self.args, str):
self.args = literal_eval(self.args)
if isinstance(self.kwargs, str):
self.kwargs = literal_eval(self.kwargs)
@property
def schedule(self):
if self.interval:
return self.interval.schedule
elif self.crontab:
return self.crontab.schedule
else:
raise Exception("must define interval or crontab schedule")
def __str__(self):
fmt = "{0.name}: {{no schedule}}"
if self.interval:
fmt = "{0.name}: {0.interval}"
elif self.crontab:
fmt = "{0.name}: {0.crontab}"
else:
raise Exception("must define interval or crontab schedule")
return fmt.format(self)
|
[
"ssfdust@gmail.com"
] |
ssfdust@gmail.com
|
5c2bac8afe40da95fd9e31452f0d7db092d11988
|
e11dd1fe3c2f5b079a77d81e35d06a0c010f0e0d
|
/character.py
|
b03eede15ab858e2251c4d6d036ddef40b7a25a4
|
[] |
no_license
|
hissboombear/C.B.using_Python
|
79c23d25cfbfb3600351787c670013beaaf1037d
|
4c075a0ea240edbab9e7be267544b9ad3fce26fa
|
refs/heads/master
| 2021-05-12T10:24:11.310886
| 2018-11-10T17:35:00
| 2018-11-10T17:35:00
| 117,351,616
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
print("Create your character")
name = input("What is your character called?")
age = input("How old is your character?")
strengths = input("What are your character's strengths?")
weaknesses = input("What are your character's weaknesses?")
print(name, "says, 'Thats for creating me!'")
|
[
"hissboombear@gmail.com"
] |
hissboombear@gmail.com
|
2c4815d72b5155adfdf7058fe4a14ff7f245285f
|
6497bc5638453877744c900f7accef0203f36e89
|
/leedcode1_twosum.py
|
e4bfcfdfe9201a15782286e8a9d575f229c34ec0
|
[] |
no_license
|
budaLi/leetcode-python-
|
82e9affb3317f63a82d89d7e82650de3c804a5ac
|
4221172b46d286ab6bf4c74f4d015ee9ef3bda8d
|
refs/heads/master
| 2022-01-30T00:55:26.209864
| 2022-01-05T01:01:47
| 2022-01-05T01:01:47
| 148,323,318
| 46
| 23
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
#-*-coding:utf8-*-
#author : Lenovo
#date: 2018/7/23
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
d = {}# d is a dictionary to map the value of nums and the index in nums
size = 0
for size in range(len(nums)):
if not nums[size] in d:
d[nums[size]] = size #if nums[size] doesn't exist in d ,create it
if target - nums[size] in d: #if nums[size] and target - nums[size] are both in d
# if d[target-nums[size]] < size + 1: # one situation should be minded nums[size] == target - nums[size]
ans = [d[target - nums[size]] , size ]# for example [0,1,2] 0 and [0,1,2,0],0
return ans
ex=Solution()
e=ex.twoSum([1,2,5,7,8],16)
print(e)
|
[
"31475416+152056208@users.noreply.github.com"
] |
31475416+152056208@users.noreply.github.com
|
41ff407e001eb84f261949512c23c006b228372e
|
d92338d46e1184bc526ec15fa918c71a46e37f27
|
/GreettingAppProject/greettingApp/views.py
|
0350960d380f0579528323354b81bbf391f12890
|
[
"MIT"
] |
permissive
|
birajit95/Greeating_App_Using_Django
|
7952134afac8ea4730fe22035c60266a405797c0
|
affe165a86232fc04874795bb99e299a2a5aebe5
|
refs/heads/master
| 2023-02-03T03:39:50.528362
| 2020-12-22T14:31:39
| 2020-12-22T14:31:39
| 321,716,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,982
|
py
|
from django.shortcuts import render, HttpResponse
from .models import GreetingRecords
from .logger.logger import logger
import json
def home(request):
recordData = GreetingRecords.objects.all()
logger.info("All records are displayed")
return render(request, "greetingApp/home.html", {"data": recordData})
def addData(request):
if request.method == 'POST':
formData = json.loads(request.body.decode())
if formData:
recordData = GreetingRecords(name=formData["name"], message=formData['message'])
recordData.save()
logger.info(f"{recordData} Data is saved")
data = [dict(item) for item in GreetingRecords.objects.all().values('id', 'name', 'message')]
return HttpResponse(json.dumps(data))
else:
logger.error("Data saving failed")
return HttpResponse("false")
def deleteRecord(request, recordID):
if request.method == "DELETE":
record = GreetingRecords.objects.filter(id=recordID)
record.delete()
logger.info(f"{record} Record Deleted successfully")
data = [dict(item) for item in GreetingRecords.objects.all().values('id', 'name', 'message')]
return HttpResponse(json.dumps(data))
logger.error("Record Deletion failed")
return HttpResponse("false")
def updateRecord(request, recordID):
if request.method == "PUT":
formData = json.loads(request.body.decode())
if formData:
record = GreetingRecords.objects.get(id=recordID)
record.name = formData["name"]
record.message = formData["message"]
record.save()
logger.info(f"{record} Record updated successfully")
data = [dict(item) for item in GreetingRecords.objects.all().values('id', 'name', 'message')]
return HttpResponse(json.dumps(data))
else:
logger.error("Record Updating failed")
return HttpResponse("false")
|
[
"birajit95@gmail.com"
] |
birajit95@gmail.com
|
d31cbc5e81c667f85f43dbf60c55f2703673fc8c
|
5e66a11717a4760646c0e02bf9ffff2f82f66d18
|
/chemistry/qchem_make_opt_input_from_opt.py
|
ca09c35ad793d6cf3c29ac90a3ae9a121f288104
|
[] |
no_license
|
berquist/personal_scripts
|
4517678fa57e524e9765dc71f05594e34bdd9c72
|
d6c40ba6e5a607d26ffabf809cfdfdf3ce29bfb3
|
refs/heads/master
| 2023-07-21T08:44:36.401893
| 2023-07-07T19:55:55
| 2023-07-07T19:55:55
| 37,238,106
| 7
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,017
|
py
|
#!/usr/bin/env python
"""qchem_make_opt_input_from_opt.py: Make an input file for a Q-Chem
geometry optimization based on the last possible geometry from a
Q-Chem geometry optimization; this effectively 'restarts' the geometry
with a new filename.
The script assumes the output file being read from is called
'*opt(\d*).out', where 'opt' might be followed by a number. The script
will write an input file called '*opt(\d*)+1.in', with the previous
number incremented by one.
"""
import os.path
import re
from collections import OrderedDict
import cclib
from cclib.parser.utils import PeriodicTable
def make_file_iterator(filename):
"""Return an iterator over the contents of the given file name."""
# pylint: disable=C0103
with open(filename) as f:
contents = f.read()
return iter(contents.splitlines())
def getargs():
"""Get command-line arguments."""
import argparse
# pylint: disable=C0103
parser = argparse.ArgumentParser()
parser.add_argument("outputfilename", nargs="+")
parser.add_argument("--fragment", action="store_true")
args = parser.parse_args()
return args
def parse_user_input(outputfilename):
"""Parse the $rem section in the repeated 'User input:' section of the
output.
The reason we do it this way rather than with shell tools is to
handle any $section more easily and in a case-insensitive manner.
"""
user_input = dict()
outputfile = make_file_iterator(outputfilename)
line = ""
while "User input:" not in line:
line = next(outputfile)
line = next(outputfile)
assert "----" in line
line = next(outputfile)
while "--------------------------------------------------------------" not in line:
if line.strip() == "":
pass
elif line[0] == "$" and line.strip().lower() != "$end":
section_header = line[1:].lower()
user_input[section_header] = []
elif line.strip().lower() == "$end":
user_input[section_header] = "\n".join(user_input[section_header])
else:
user_input[section_header].append(line)
line = next(outputfile)
return user_input
def parse_fragments_from_molecule(molecule):
"""Given a $molecule section (without the $ lines), identify the
charges and multiplicities of each fragment and the zero-based indices
for the starting atom of each fragment.
"""
charges = []
multiplicities = []
start_indices = []
it = iter(molecule.splitlines())
line = next(it)
# sys_charge, sys_multiplicity = line.split()
counter = 0
# Gather the charges, spin multiplicities, and starting positions
# of each fragment.
for line in it:
if "--" in line:
line = next(it)
charge, multiplicity = line.split()
charges.append(charge)
multiplicities.append(multiplicity)
start_indices.append(counter)
else:
counter += 1
assert len(charges) == len(multiplicities) == len(start_indices)
return charges, multiplicities, start_indices
def form_molecule_section_from_fragments(
elements, geometry, charges, multiplicities, start_indices
):
"""Form the Q-Chem $molecule section containing the charge,
multiplicity, and atomic symbols and coordinates for multiple
fragments.
Returns a list that will need to be joined with newlines.
"""
assert len(charges) == len(multiplicities) == (len(start_indices) + 1)
s = "{:3s} {:15.10f} {:15.10f} {:15.10f}"
# The first elements of the charge and multiplicity lists are for
# the supersystem (whole molecule).
molecule_section = ["{} {}".format(charges[0], multiplicities[0])]
from itertools import count
for (charge, multiplicity, idx_iter) in zip(charges[1:], multiplicities[1:], count(0)):
molecule_section.append("--")
molecule_section.append("{} {}".format(charge, multiplicity))
idx_start = start_indices[idx_iter]
try:
idx_end = start_indices[idx_iter + 1]
except IndexError:
idx_end = len(elements)
for element, coords in zip(elements[idx_start:idx_end], geometry[idx_start:idx_end]):
molecule_section.append(s.format(element, *coords))
return molecule_section
def form_molecule_section(elements, geometry, charge, multiplicity):
"""Form the Q-Chem $molecule section containing the charge,
multiplicity, and atomic symbols and coordinates.
Returns a list that will need to be joined with newlines.
"""
s = "{:3s} {:15.10f} {:15.10f} {:15.10f}"
molecule_section = ["{} {}".format(charge, multiplicity)]
for (
element,
coords,
) in zip(elements, geometry):
molecule_section.append(s.format(element, *coords))
return molecule_section
if __name__ == "__main__":
args = getargs()
pt = PeriodicTable()
for outputfilename in args.outputfilename:
job = cclib.io.ccopen(outputfilename)
assert isinstance(job, cclib.parser.qchemparser.QChem)
try:
data = job.parse()
# this is to deal with the Q-Chem parser not handling
# incomplete SCF cycles properly
except StopIteration:
print("no output made: StopIteration in {}".format(outputfilename))
continue
# Determine the name of the file we're writing.
assert outputfilename.endswith(".out")
numstr = re.search(r"opt(\d*)", outputfilename).groups()[0]
if numstr == "":
optnum = 2
else:
optnum = int(numstr) + 1
inputfilename = re.sub(r"opt\d*", "opt{}".format(optnum), outputfilename)
inputfilename = inputfilename.replace(".out", ".in")
inputfilename = os.path.basename(inputfilename)
user_input = parse_user_input(outputfilename)
# Form the atomic symbols and coordinates for each atom in
# $molecule.
element_list = [pt.element[Z] for Z in data.atomnos]
last_geometry = data.atomcoords[-1]
if args.fragment:
charges, multiplicities, start_indices = parse_fragments_from_molecule(
user_input["molecule"]
)
charges.insert(0, data.charge)
multiplicities.insert(0, data.mult)
molecule_section = form_molecule_section_from_fragments(
element_list, last_geometry, charges, multiplicities, start_indices
)
else:
molecule_section = form_molecule_section(
element_list, last_geometry, data.charge, data.mult
)
user_input["molecule"] = "\n".join(molecule_section)
with open(inputfilename, "w") as fh:
for section_header in user_input:
fh.write("${}\n".format(section_header))
fh.write(user_input[section_header])
fh.write("\n$end\n\n")
print(inputfilename)
|
[
"eric.berquist@gmail.com"
] |
eric.berquist@gmail.com
|
9931346849ddccb0eb1f98dabaf8c0da6ac9234d
|
3ad5446f155d160e1f55cd59a364c415c2174b62
|
/primes.py
|
83cf5d0de71e067a9aefc8ac8f59c8bca80afdb7
|
[] |
no_license
|
nmanley73/pyprimes
|
bb87891913d881558b833c2632855927963f858d
|
04f263b683bc73134c2eca226b03f15cf79ca50a
|
refs/heads/master
| 2021-01-05T08:56:19.507653
| 2020-02-16T21:53:27
| 2020-02-16T21:53:27
| 240,965,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
# Noel Manley
# Computing the primes
P = []
# Loop through all the numbers to check for prime no's
for i in range(2, 100):
# assume number is prime
isprime = True
# Loop through all values from 2 up to i
for j in range(2, i):
# see if j divides i
if i % j == 0:
# If it does, i isn't prime so exit the loop
isprime = False
break
# If it i prime then append to P
if isprime:
P.append(i)
# Print out the list
print(P)
|
[
"noelmanley@hotmail.com"
] |
noelmanley@hotmail.com
|
f3287e42a48321132242a2d84b76e9deee52f5db
|
7834e7a48399b156401ea62c0c6d2de80ad421f5
|
/pysparkling/fileio/codec/codec.py
|
c057cfaa4b9cab5df56f5d5f9ac4badb66914438
|
[
"MIT"
] |
permissive
|
vojnovski/pysparkling
|
b9758942aba0d068f6c51797c8fb491cf59c3401
|
21b36464371f121dc7963dac09d300e7235f587e
|
refs/heads/master
| 2020-04-08T18:33:55.707209
| 2016-07-27T15:12:59
| 2016-07-27T15:12:59
| 62,555,929
| 0
| 0
| null | 2016-07-04T11:06:18
| 2016-07-04T11:06:18
| null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
import logging
log = logging.getLogger(__name__)
class Codec(object):
def __init__(self):
pass
def compress(self, stream):
return stream
def decompress(self, stream):
return stream
|
[
"me@svenkreiss.com"
] |
me@svenkreiss.com
|
3a10c93c8ba77ad266183f0f7bc735de82fef001
|
ddaf6962ecda9977733d377fb06e89944d769aea
|
/controllers/ships.py
|
d7aa6d953e592a589fdab908c0519066e7596cd4
|
[] |
no_license
|
LaurenFWinter/Project-04
|
0d34561d7f22ad0a1e048645c70baebd22b190f2
|
3fe3eda20f542ac1d9230d59de8f68c3b10a44f6
|
refs/heads/master
| 2023-01-13T00:24:25.430905
| 2019-06-17T11:51:34
| 2019-06-17T11:51:34
| 188,671,513
| 0
| 0
| null | 2022-12-22T11:19:30
| 2019-05-26T10:56:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,766
|
py
|
from flask import Blueprint, request, jsonify, abort
from pony.orm import db_session
from app import db
from marshmallow import ValidationError
from models.Ship import Ship, ShipSchema
from lib.secure_route import secure_route
# creating a router to this controller
router = Blueprint(__name__, 'ships')
# getting all of the ships
@router.route('/ships', methods=['GET'])
@db_session
def index():
schema = ShipSchema(many=True)
ships = Ship.select()
return schema.dumps(ships)
@router.route('/ships', methods=['POST'])
@db_session
@secure_route
def create():
schema = ShipSchema()
try:
data = schema.load(request.get_json())
ship = Ship(**data)
db.commit()
except ValidationError as err:
return jsonify({'message': 'Validation failed', 'errors': err.messages}), 422
return schema.dumps(ship), 201
@router.route('/ships/<int:ship_id>',
methods=['GET'])
@db_session
def show(ship_id):
schema = ShipSchema()
ship = Ship.get(id=ship_id)
if not ship:
abort(404)
return schema.dumps(ship)
@router.route('/ships/<int:ship_id>', methods=['PUT'])
@db_session
@secure_route
def update(ship_id):
schema = ShipSchema()
ship = Ship.get(id=ship_id)
if not ship:
abort(404)
try:
data = schema.load(request.get_json())
ship.set(**data)
db.commit()
except ValidationError as err:
return jsonify({'message': 'Validation failed', 'errors': err.messages}), 422
return schema.dumps(ship)
@router.route('/ships/<int:ship_id>', methods=['DELETE'])
@db_session
@secure_route
def delete(ship_id):
ship = Ship.get(id=ship_id)
if not ship:
abort(404)
ship.delete()
db.commit()
return '', 204
|
[
"lauren.fwinter@gmail.com"
] |
lauren.fwinter@gmail.com
|
5c7e35eb9122c4cf1073839e65428b6b4db9e74f
|
26c0de13a5bc7f1d2bb2cc1f854e492b95419232
|
/webapp/guest_book/migrations/0001_initial.py
|
077856f4959d2b19653db3b10adac167bd62c4ed
|
[] |
no_license
|
rodnoi7/python-group_3-exam_6-kaiypbek_sydykov
|
02bb2606b617cf2f1dbe230501e3fef87b4955a7
|
a4ac5b1bfe7e472bca59b2c603990c27351d2aaa
|
refs/heads/master
| 2023-05-01T01:35:42.125008
| 2019-09-21T13:04:38
| 2019-09-21T13:04:38
| 209,978,170
| 0
| 0
| null | 2023-04-21T20:37:32
| 2019-09-21T12:04:45
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
# Generated by Django 2.1 on 2019-09-21 06:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200, verbose_name='Автор')),
('author_email', models.CharField(max_length=200, verbose_name='Email автора')),
('title', models.CharField(max_length=200, verbose_name='Заголовок')),
('text', models.TextField(max_length=3000, verbose_name='Текст статьи')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Время создания')),
('status', models.CharField(choices=[('Active', 'Active'), ('Deactive', 'Deactive')], default='Active', max_length=50, verbose_name='Статус')),
],
),
]
|
[
"sydykov.99@gmail.com"
] |
sydykov.99@gmail.com
|
0ec2cebaf70d3553ef0ba7ab657a313fe4d7b7c4
|
bb38c64b43ecb51aa30031d05c8cde5a7cecae58
|
/aug_v5.py
|
3e8a46be2a507488d77a6c19b77356e491f0b7b6
|
[] |
no_license
|
ZiwenYeee/Santander-Customer-Transaction-Prediction
|
c2bd2b8085e73e3dad46234fddb27b52289e0ec5
|
439e647fb05e97e360f29cd39ee75b715556074b
|
refs/heads/master
| 2020-08-06T22:20:08.529636
| 2019-10-06T14:01:43
| 2019-10-06T14:01:43
| 213,178,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,477
|
py
|
import numpy as np
import pandas as pd
import gc
import time
import lightgbm as lgb
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn.model_selection import KFold, StratifiedKFold
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
train = pd.read_csv('input/train.csv')
test = pd.read_csv('input/test.csv')
test_fake = pd.read_csv('synthetic_samples_indexes.csv')
test_fake.columns = ['ID_code']
test_fake['ID_code'] = test_fake.ID_code.apply(lambda x: 'test_' + str(x))
test_fake['dis'] = 1
test = pd.merge(test, test_fake, on = ['ID_code'], how = 'left')
test.dis.fillna(0, inplace=True)
test_real = test.loc[test.dis == 0]
test_fake = test.loc[test.dis == 1]
train['flag']=1
test_real['flag']=2
test_fake['flag']=3
data=pd.concat([train,test_real]).reset_index(drop=True)
print('data.shape=',data.shape)
del train,test_real
for var in features:
data['scaled_' + var]= (data[var]-data[var].mean())/data[var].std()*5
train=data[data['flag']==1].copy()
test_real=data[data['flag']==2].copy()
# test_fake=data[data['flag']==3].copy()
test=data[data['flag']>=2].copy()
test = pd.concat([test, test_fake], axis = 0)
print(train.shape,test_real.shape,test_fake.shape,test.shape)
del data
print(len(features))
def feature_eng(train, valid, test, origin_train, origin_test,feat):
for var in feat:
print(var)
data = pd.concat([origin_train[['ID_code', var]], origin_test[['ID_code', var]]])
data['weight_' + var] = data[var].map(data.groupby([var])[var].count())
train['weight_' + var] = train[var].map(data.groupby([var])[var].count())
valid['weight_' + var] = valid[var].map(data.groupby([var])[var].count())
test['weight_'+ var] = test[var].map(data.groupby([var])[var].count())
train['binary_' + var] = train['weight_' + var].apply(lambda x: 1 if x > 1 else 0) * train[var]
valid['binary_' + var] = valid['weight_' + var].apply(lambda x: 1 if x > 1 else 0) * valid[var]
test['binary_' + var] = test['weight_' + var].apply(lambda x: 1 if x > 1 else 0) * test[var]
return train, valid, test
def augment(x,y,t=2):
xs,xn = [],[]
for i in range(t):
mask = y>0
x1 = x[mask].copy()
ids = np.arange(x1.shape[0])
for c in range(x1.shape[1]):
np.random.shuffle(ids)
x1[:,c] = x1[ids][:,c]
xs.append(x1)
for i in range(t//2):
mask = y==0
x1 = x[mask].copy()
ids = np.arange(x1.shape[0])
for c in range(x1.shape[1]):
np.random.shuffle(ids)
x1[:,c] = x1[ids][:,c]
xn.append(x1)
xs = np.vstack(xs)
xn = np.vstack(xn)
ys = np.ones(xs.shape[0])
yn = np.zeros(xn.shape[0])
x = np.vstack([x,xs,xn])
y = np.concatenate([y,ys,yn])
return x,y
def kfold_lightgbm(x_train,x_test, feature, feature_list,test = True, params,num_folds, stratified = False):
if stratified:
folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=2)
else:
folds = KFold(n_splits= num_folds, shuffle=True, random_state=2)
ntrain = x_train.shape[0]
ntest = x_test.shape[0]
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((num_folds, ntest))
feature_importance_df = pd.DataFrame()
for n_fold, (train_idx, test_idx) in enumerate(folds.split(x_train[feature],x_train['target'])):
print('\n############################# kfold = ' + str(n_fold + 1))
X_train, X_valid = x_train[feature].iloc[train_idx],x_train[feature].iloc[test_idx]
y_train, y_valid = x_train['target'].iloc[train_idx],x_train['target'].iloc[test_idx]
print('after kfold split, shape = ', X_train.shape)
N = 1
pred_valid, pred_test = 0,0
for Ni in range(N):
print('Ni = ', Ni)
X_t, y_t = augment(X_train.values, y_train.values)
X_t = pd.DataFrame(X_t, columns = feature)
print('after augmentation, shape = ', X_t.shape)
train_fe, valid_fe, test_fe = feature_eng(X_t, X_valid, x_test, x_train, test_real, feature)
print('after FE, shape = ', train_fe.shape)
if test:
train_fe = train_fe
valid_fe = valid_fe
test_fe = test_fe
else:
train_fe = train_fe[feature_list]
valid_fe = valid_fe[feature_list]
test_fe = test_fe[feature_list]
dtrain = lgb.Dataset(data = train_fe,
label = y_t,
free_raw_data = False, silent = True)
dtest = lgb.Dataset(data = X_valid,
label = y_valid,
free_raw_data = False, silent = True)
clf = lgb.train(
params=params,
train_set=dtrain,
num_boost_round=100000,
valid_sets=[dtrain, dtest],
early_stopping_rounds=400,
verbose_eval=4000
)
pred_valid += clf.predict(dtest.data)/N
pred_test += clf.predict(x_test[train_fe.columns])/N
oof_train[test_idx] = pred_valid
oof_test_skf[n_fold,:] = pred_test
print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(dtest.label, oof_train[test_idx])))
del clf, dtrain, dtest
gc.collect()
print("Full AUC score %.6f" % roc_auc_score(x_train['target'], oof_train))
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train, oof_test
params = {'metric': 'auc',
'learning_rate': 0.01,
'nthread': -1,
'max_depth':1,
'reg_lambda': 0.0,
'objective': 'binary',
# 'colsample_bytree': 1,
'bagging_freq': 5,
'feature_fraction':0.05,
'min_data_in_leaf':80,
'min_sum_hessian_in_leaf':10,
'boost_from_average':False,
'tree_learner':'serial',
'num_leaves': 13,
'boosting_type': 'gbdt'}
features = [col for col in train.columns if col not in ['target','ID_code','flag']]
oof_train,oof_test = kfold_lightgbm(train, test,
features,feature_list = features, test = True, params,
5, stratified = False)
|
[
"noreply@github.com"
] |
ZiwenYeee.noreply@github.com
|
eae6b7945326af4a005546cc52f1a8376b7fd3f9
|
c66a71b46e4b4cf86115dcb845aecb6e3471bd73
|
/conway/conway.py
|
db106a36641d0f568078af8953f2e0caab1a5319
|
[
"MIT"
] |
permissive
|
tjshaffer21/Conway
|
8fe597f041679e1a9db69be648c84bcd4be99ce6
|
59663aed00ce5a2e516dec1f1b31fa3c3b7e92c4
|
refs/heads/master
| 2021-01-22T05:33:53.295148
| 2018-09-25T22:11:16
| 2018-09-25T22:11:16
| 81,675,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,067
|
py
|
# -*- coding: utf-8 -*-
"""conway.py: Implementation of Conway's Game of Life.
Attributes:
living_cell (pygame.Color): The initial color of a cell when it becomes alive.
dead_cell (pygame.Color): The color of a non-living cell.
"""
import sys, random
import pygame
living_cell = pygame.Color(150, 0, 0, 0)
dead_cell = pygame.Color(0, 0, 0, 0)
class State(object):
"""Class to hold the state of the environment.
Attributes:
conway (list): 2D list containing the current state of the conway en-
vironment.
living (int): The number of living cells.
Args:
width (int): The width for the conway data.
height (int): The height for the conway data.
"""
def __init__(self, width: int, height: int):
self._width = width
self._height = height
self._generations = 1
self.living = 0
self.conway = _seed(self._width, self._height)
for i in self.conway:
self.living += i.count(1)
@property
def width(self) -> int:
"""Return the width of the system.
Returns:
int
"""
return self._width
@property
def height(self) -> int:
"""Return the height of the system.
Returns:
int
"""
return self._height
@property
def generations(self) -> int:
"""Return the number of generations that have passed.
Returns:
int
"""
return self._generations
def inc_generation(self):
"""Increment the generation counter.
Post:
_generations is modified.
"""
self._generations += 1
def colorize(conway: list, color_grid: list) -> list:
"""Sets colors for the conway system.
Pre:
color_grid must be the list as defined in tiles.tilemap.
Post:
Arg color_grid is modified.
Args:
conway (list): conway list
color_grid (list): color list.
Returns:
list: color_grid is returned
"""
for y in range(0, len(conway)):
for x in range(0, len(conway[y])):
current = color_grid.get_current_chunk()[y][x]
if conway[y][x] == 0:
if current.color != dead_cell:
current.color = dead_cell
else:
if current.color == dead_cell:
current.color = living_cell
else:
color = current.color
if color.r < 255:
ncolor = pygame.Color(color.r+1, color.g, color.b, color.a)
current.color = ncolor
elif color.g < 255:
ncolor = pygame.Color(color.r, color.g+1, color.b, color.a)
current.color = ncolor
elif color.b < 255:
ncolor = pygame.Color(color.r, color.g, color.b+1, color.a)
current.color = ncolor
return color_grid
def increment(conway: list) -> int:
"""Increment conway by one.
Post
Arg conway is modified.
Args:
conway (list): conway list
Returns:
int: The number of living cells.
"""
def alive(arr: list, xy: tuple) -> bool:
"""Check if a cell is alive.
Alive is defined as currently living (1) or dying (-1); where dying
indicates a temporary indicator.
Args:
arr (list): conway list
xy (tuple): Position in arr defined in (x,y)
Returns:
boolean
"""
return True if arr[xy[1]][xy[0]] == -1 or arr[xy[1]][xy[0]] == 1 else False
def num_neighbors(arr: list, xy: tuple) -> int:
"""Return the number of living neighbors.
Args:
arr (list): conway list
xy (tuple): Position in arr using (x,y) values.
Returns:
int
"""
value = 0
for i in _moore_neighbors(arr, xy):
if alive(conway, i):
value += 1
return value
for y in range(0, len(conway)):
for x in range(0, len(conway[y])):
if alive(conway, (x, y)) and \
(num_neighbors(conway, (x, y)) <= 1 or num_neighbors(conway, (x, y)) > 3):
conway[y][x] = -1
elif not alive(conway, (x, y)) and num_neighbors(conway, (x, y)) == 3:
conway[y][x] = 2
# Check for number of living cells while flipping the cells to their proper
# states.
living = 0
for y in range(0, len(conway)):
for x in range(0, len(conway[y])):
if conway[y][x] == -1:
conway[y][x] = 0
elif conway[y][x] == 2:
conway[y][x] = 1
living += 1
elif conway[y][x] == 1:
living += 1
return living
def update(state: State, color_grid: list) -> tuple:
"""Update the conway state.
Pre:
color_grid must be the list as defined in tiles.tilemap.
Post:
state is modified
color_grid is modified.
Args:
state (conway.State): The conway state
Returns:
tuple (conway.State, list): State and color_grid are returned.
"""
state.living = increment(state.conway)
colorize(state.conway, color_grid)
state.inc_generation()
return (state, color_grid)
def _moore_neighbors(arr: list, xy: tuple) -> tuple:
"""Obtain a list of Moore's neighbours.
Pre:
arr must be a 2D list.
Args:
arr (list): 2d list.
xy (tuple): (x,y) values coresponding to the x,y values in arr.
Returns:
list: A list of tuples holding the neighbor's (x,y) values.
"""
width = len(arr[0])-1
height = len(arr)-1
neighbors = []
for x in range(xy[0]-1, xy[0]+2):
for y in range(xy[1]-1, xy[1]+2):
if (x >= 0 and y >= 0) and (x <= width and y <= height):
if not (xy[0] == x and xy[1] == y):
neighbors.append((x, y))
return neighbors
def _seed(width: int, height: int) -> list:
"""Create the initial environment.
Args:
width (int): The width of the environment.
height (int): The height of the environment.
Returns:
list
"""
seeds = [[random.random() for _ in range(width)] for _ in range(height)]
# For each cell, get the neighbors.
# If the neighbor's value is <= 0.5 then remove else
# if random value is < 0 remove.
for x in range(0, width):
for y in range(0, height):
for i in _moore_neighbors(seeds, (x,y)):
if seeds[i[1]][i[0]] < seeds[y][x]:
if seeds[i[1]][i[0]] <= 0.5:
seeds[i[1]][i[0]] = 0
elif random.random() < 0.5:
seeds[i[1]][i[0]] = 0
# Final environment should only be 0 or 1.
for y in range(0, height):
for x in range(0, width):
seeds[y][x] = round(seeds[y][x])
return seeds
|
[
"tjshaffer21@gmail.com"
] |
tjshaffer21@gmail.com
|
b02d86a45d6c0be51b3c76f117ce1eadc89847e4
|
d4ea02450749cb8db5d8d557a4c2616308b06a45
|
/students/JonathanMauk/lesson02/series.py
|
2d74ea35237f9b07550c8ff0849f3dde42434744
|
[] |
no_license
|
UWPCE-PythonCert-ClassRepos/Self_Paced-Online
|
75421a5bdd6233379443fc310da866ebfcd049fe
|
e298b1151dab639659d8dfa56f47bcb43dd3438f
|
refs/heads/master
| 2021-06-16T15:41:07.312247
| 2019-07-17T16:02:47
| 2019-07-17T16:02:47
| 115,212,391
| 13
| 160
| null | 2019-11-13T16:07:35
| 2017-12-23T17:52:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
def fibonacci(n):
"""Function that returns the nth term in the Fibonacci sequence, per F(n) = (n-1) + (n-2)."""
if n < 0:
print("Error: the first term in the Fibonacci sequence is 0. Please try again.")
else:
return sum_series(n)
def lucas(n):
"""Function that returns the nth term in the Lucas series, per F(n) = (n-1) + (n-2), and F(0) = 2 while F(1) = 1."""
if n < 0:
print("Error: the first term in the Lucas series is 2. Please try again.")
else:
return sum_series(n, 2, 1)
def sum_series(n, x=0, y=1):
"""
Generalized function that returns nth term in recursive sequences like Fibonacci and Lucas.
Defaults to Fibonacci sequence.
"""
if n == 0:
return x
if n == 1:
return y
else:
return sum_series(n-1, x, y) + sum_series(n-2, x, y)
# Assert statements: Fibonacci edition
assert fibonacci(1)
assert fibonacci(5)
assert fibonacci(15)
# Assert statements: Lucas edition
assert lucas(0)
assert lucas(3)
assert lucas(8)
# Assert statements: sum series edition
assert sum_series(4) # defaults to Fibonacci
assert sum_series(5, 2, 1) # Lucas series
assert sum_series(8, 7, 5) # random
|
[
"jorauk@uw.edu"
] |
jorauk@uw.edu
|
9dbdfe36edaee2635775c291422d6c4fd26bfdeb
|
c1686616dddcbf9818029e76dcf5e93ccf3a6088
|
/bee_colony/utils.py
|
36f0e9b5e932b448d2cd5819e4e5aa3ed691c20a
|
[
"MIT"
] |
permissive
|
srom/abc
|
6cae2b8e436d966109705f00c72d3c91aac69338
|
41aa0fe9a381e7387d66cb0f8be7b8e454bb6a97
|
refs/heads/master
| 2020-08-23T09:46:06.260211
| 2019-10-30T16:06:08
| 2019-10-30T16:06:08
| 216,589,210
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
import numpy as np
def get_random_other_index(current_index, num_indices):
indices = list(range(num_indices))
random_index = current_index
while random_index == current_index:
random_index = np.random.randint(0, num_indices)
return random_index
def assign_probabilities(probabilities, ordered_indices):
if len(probabilities) != len(ordered_indices):
raise ValueError(f'Length mismatch: {len(probabilities)} != {len(ordered_indices)}')
sorted_probabilities = np.zeros((len(probabilities),))
for i, idx in enumerate(ordered_indices):
sorted_probabilities[idx] = probabilities[i]
return sorted_probabilities
def uniform_init(bounds):
"""
Uniform initialization function.
"""
if not isinstance(bounds, (np.ndarray, list)):
raise ValueError('Bounds must be a list or numpy array')
elif np.ndim(bounds) != 2:
ndim = np.ndim(bounds)
raise ValueError(f'Bounds must be a 2D array but got an array of dim {ndim}')
dimensions = len(bounds)
min_bounds = np.array([b[0] for b in bounds])
max_bounds = np.array([b[1] for b in bounds])
def init_fn(population_size):
return np.random.uniform(min_bounds, max_bounds, size=(population_size, dimensions))
return init_fn
|
[
"romain.strock@gmail.com"
] |
romain.strock@gmail.com
|
73a435e8064d91919dec34b8cd6bebc8580cccd6
|
47b4d76e9c87e6c45bab38e348ae12a60a60f94c
|
/Mutation_Modules/ASP_ABU.py
|
daf6d4fc6047cc403fb95ef273d03a28cd399101
|
[] |
no_license
|
PietroAronica/Parasol.py
|
9bc17fd8e177e432bbc5ce4e7ee2d721341b2707
|
238abcdc2caee7bbfea6cfcdda1ca705766db204
|
refs/heads/master
| 2021-01-10T23:57:40.225140
| 2020-10-14T02:21:15
| 2020-10-14T02:21:15
| 70,791,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,941
|
py
|
# ASP to ABU Mutation
import Frcmod_creator
import PDBHandler
import Leapy
from parmed.tools.actions import *
from parmed.amber.readparm import *
def parmed_command(vxi='VXI', lipid='No'):
bc = {}
with open('Param_files/AminoAcid/ASP.param', 'r') as b:
data = b.readlines()[1:]
for line in data:
key, value = line.split()
bc[key] = float(value)
b.close()
fc = {}
with open('Param_files/AminoAcid/ABU.param', 'r') as b:
data = b.readlines()[1:]
for line in data:
key, value = line.split()
fc[key] = float(value)
b.close()
for i in range(11):
a = i*10
i = float(i)
parm = AmberParm('Solv_{}_{}.prmtop'.format(a, 100-a))
changeLJPair(parm, ':{}@HB2'.format(vxi), ':{}@OD1'.format(vxi), '0', '0').execute()
changeLJPair(parm, ':{}@HB'.format(vxi), ':{}@HG1'.format(vxi), '0', '0').execute()
change(parm, 'charge', ':{}@N'.format(vxi), bc['N']+((fc['N']-bc['N'])/10)*i).execute()
change(parm, 'charge', ':{}@H'.format(vxi), bc['H']+((fc['H']-bc['H'])/10)*i).execute()
change(parm, 'charge', ':{}@CA'.format(vxi), bc['CA']+((fc['CA']-bc['CA'])/10)*i).execute()
change(parm, 'charge', ':{}@HA'.format(vxi), bc['HA']+((fc['HA']-bc['HA'])/10)*i).execute()
change(parm, 'charge', ':{}@CB'.format(vxi), bc['CB']+((fc['CB']-bc['CB'])/10)*i).execute()
change(parm, 'charge', ':{}@HB'.format(vxi), bc['HB2']-(bc['HB2']/10)*i).execute()
change(parm, 'charge', ':{}@HB2'.format(vxi), fc['HB2']/10*i).execute()
change(parm, 'charge', ':{}@HB3'.format(vxi), bc['HB3']+((fc['HB3']-bc['HB3'])/10)*i).execute()
change(parm, 'charge', ':{}@CG'.format(vxi), fc['CG']/10*i).execute()
change(parm, 'charge', ':{}@HG1'.format(vxi), fc['HG1']/10*i).execute()
change(parm, 'charge', ':{}@HG2'.format(vxi), fc['HG2']/10*i).execute()
change(parm, 'charge', ':{}@HG3'.format(vxi), fc['HG3']/10*i).execute()
change(parm, 'charge', ':{}@CG1'.format(vxi), (bc['CG']-(bc['CG']/10)*i)*(10-i)/10).execute()
change(parm, 'charge', ':{}@OD1'.format(vxi), (bc['OD1']-(bc['OD1']/10)*i)*(10-i)/10).execute()
change(parm, 'charge', ':{}@OD2'.format(vxi), (bc['OD2']-(bc['OD2']/10)*i)*(10-i)/10).execute()
change(parm, 'charge', ':{}@C'.format(vxi), bc['C']+((fc['C']-bc['C'])/10)*i).execute()
change(parm, 'charge', ':{}@O'.format(vxi), bc['O']+((fc['O']-bc['O'])/10)*i).execute()
#print printDetails(parm, ':VXI')
d = netCharge(parm).execute()
change(parm, 'charge', ':PC', '{:.3f}'.format(-d)).execute()
setOverwrite(parm).execute()
parmout(parm, 'Solv_{}_{}.prmtop'.format(a, 100-a)).execute()
def makevxi(struct, out, aa, vxi='VXI'):
struct.residue_dict[aa].set_resname(vxi)
CB = struct.residue_dict[aa].atom_dict['CB']
HB2 = struct.residue_dict[aa].atom_dict['HB2']
CG = struct.residue_dict[aa].atom_dict['CG']
pdb = open(out, 'w')
try:
pdb.write(struct.other_dict['Cryst1'].formatted())
except KeyError:
pass
for res in struct.residue_list:
for atom in res.atom_list:
if atom.get_name() == 'HB2' and res.get_resname() == vxi:
pdb.write(atom.change_name('HB'))
pdb.write(atom.superimposed1('HB2', CG))
elif atom.get_name() == 'HB3' and res.get_resname() == vxi:
pdb.write(atom.formatted())
pdb.write(atom.halfway_between('CG', CB, HB2))
pdb.write(atom.superimposed1('HG1', HB2))
pdb.write(atom.superimposed2('HG2', HB2))
pdb.write(atom.superimposed3('HG3', HB2))
elif atom.get_name() == 'CG' and res.get_resname() == vxi:
pdb.write(atom.change_name('CG1'))
else:
pdb.write(atom.formatted())
try:
pdb.write(struct.other_dict[atom.get_number()].ter())
except:
pass
for oth in struct.other_dict:
try:
if oth.startswith('Conect'):
pdb.write(struct.other_dict[oth].formatted())
except:
pass
pdb.write('END\n')
def variablemake(sym='^'):
var1 = sym + '1'
var2 = sym + '2'
var3 = sym + '3'
var4 = sym + '4'
var5 = sym + '5'
var6 = sym + '6'
var7 = sym + '7'
var8 = sym + '8'
var9 = sym + '9'
var10 = sym + '0'
var11 = sym + 'a'
var12 = sym + 'b'
var13 = sym + 'c'
var14 = sym + 'd'
var15 = sym + 'e'
return var1, var2, var3, var4, var5, var6, var7, var8, var9, var10, var11, var12, var13, var14, var15
def lib_make(ff, outputfile, vxi='VXI', var=variablemake()):
metcar = var[0]
methyd = var[1]
hydhyd1 = var[2]
carcar = var[3]
caroxy = var[4]
hydhyd2 = var[5]
ctrl = open('lyp.in', 'w')
ctrl.write("source %s\n"%ff)
ctrl.write("%s=loadpdb Param_files/LibPDB/ASP-ABU.pdb\n"%vxi)
ctrl.write('set %s.1.1 element "N"\n'%vxi)
ctrl.write('set %s.1.2 element "H"\n'%vxi)
ctrl.write('set %s.1.3 element "C"\n'%vxi)
ctrl.write('set %s.1.4 element "H"\n'%vxi)
ctrl.write('set %s.1.5 element "C"\n'%vxi)
ctrl.write('set %s.1.6 element "H"\n'%vxi)
ctrl.write('set %s.1.7 element "H"\n'%vxi)
ctrl.write('set %s.1.8 element "H"\n'%vxi)
ctrl.write('set %s.1.9 element "C"\n'%vxi)
ctrl.write('set %s.1.10 element "H"\n'%vxi)
ctrl.write('set %s.1.11 element "H"\n'%vxi)
ctrl.write('set %s.1.12 element "H"\n'%vxi)
ctrl.write('set %s.1.13 element "C"\n'%vxi)
ctrl.write('set %s.1.14 element "O"\n'%vxi)
ctrl.write('set %s.1.15 element "O"\n'%vxi)
ctrl.write('set %s.1.16 element "C"\n'%vxi)
ctrl.write('set %s.1.17 element "O"\n'%vxi)
ctrl.write('set %s.1.1 name "N"\n'%vxi)
ctrl.write('set %s.1.2 name "H"\n'%vxi)
ctrl.write('set %s.1.3 name "CA"\n'%vxi)
ctrl.write('set %s.1.4 name "HA"\n'%vxi)
ctrl.write('set %s.1.5 name "CB"\n'%vxi)
ctrl.write('set %s.1.6 name "HB"\n'%vxi)
ctrl.write('set %s.1.7 name "HB2"\n'%vxi)
ctrl.write('set %s.1.8 name "HB3"\n'%vxi)
ctrl.write('set %s.1.9 name "CG"\n'%vxi)
ctrl.write('set %s.1.10 name "HG1"\n'%vxi)
ctrl.write('set %s.1.11 name "HG2"\n'%vxi)
ctrl.write('set %s.1.12 name "HG3"\n'%vxi)
ctrl.write('set %s.1.13 name "CG1"\n'%vxi)
ctrl.write('set %s.1.14 name "OD1"\n'%vxi)
ctrl.write('set %s.1.15 name "OD2"\n'%vxi)
ctrl.write('set %s.1.16 name "C"\n'%vxi)
ctrl.write('set %s.1.17 name "O"\n'%vxi)
ctrl.write('set %s.1.1 type "N"\n'%vxi)
ctrl.write('set %s.1.2 type "H"\n'%vxi)
ctrl.write('set %s.1.3 type "CT"\n'%vxi)
ctrl.write('set %s.1.4 type "H1"\n'%vxi)
ctrl.write('set %s.1.5 type "CT"\n'%vxi)
ctrl.write('set %s.1.6 type "%s"\n'%(vxi, hydhyd1))
ctrl.write('set %s.1.7 type "%s"\n'%(vxi, hydhyd2))
ctrl.write('set %s.1.8 type "HC"\n'%vxi)
ctrl.write('set %s.1.9 type "%s"\n'%(vxi, metcar))
ctrl.write('set %s.1.10 type "%s"\n'%(vxi, methyd))
ctrl.write('set %s.1.11 type "%s"\n'%(vxi, methyd))
ctrl.write('set %s.1.12 type "%s"\n'%(vxi, methyd))
ctrl.write('set %s.1.13 type "%s"\n'%(vxi, carcar))
ctrl.write('set %s.1.14 type "%s"\n'%(vxi, caroxy))
ctrl.write('set %s.1.15 type "%s"\n'%(vxi, caroxy))
ctrl.write('set %s.1.16 type "C"\n'%vxi)
ctrl.write('set %s.1.17 type "O"\n'%vxi)
ctrl.write('bond %s.1.1 %s.1.2\n'%(vxi, vxi))
ctrl.write('bond %s.1.1 %s.1.3\n'%(vxi, vxi))
ctrl.write('bond %s.1.3 %s.1.4\n'%(vxi, vxi))
ctrl.write('bond %s.1.3 %s.1.5\n'%(vxi, vxi))
ctrl.write('bond %s.1.3 %s.1.16\n'%(vxi, vxi))
ctrl.write('bond %s.1.5 %s.1.6\n'%(vxi, vxi))
ctrl.write('bond %s.1.5 %s.1.7\n'%(vxi, vxi))
ctrl.write('bond %s.1.5 %s.1.8\n'%(vxi, vxi))
ctrl.write('bond %s.1.5 %s.1.9\n'%(vxi, vxi))
ctrl.write('bond %s.1.5 %s.1.13\n'%(vxi, vxi))
ctrl.write('bond %s.1.9 %s.1.10\n'%(vxi, vxi))
ctrl.write('bond %s.1.9 %s.1.11\n'%(vxi, vxi))
ctrl.write('bond %s.1.9 %s.1.12\n'%(vxi, vxi))
ctrl.write('bond %s.1.13 %s.1.14\n'%(vxi, vxi))
ctrl.write('bond %s.1.13 %s.1.15\n'%(vxi, vxi))
ctrl.write('bond %s.1.16 %s.1.17\n'%(vxi, vxi))
ctrl.write('set %s.1 connect0 %s.1.N\n'%(vxi, vxi))
ctrl.write('set %s.1 connect1 %s.1.C\n'%(vxi, vxi))
ctrl.write('set %s name "%s"\n'%(vxi, vxi))
ctrl.write('set %s.1 name "%s"\n'%(vxi, vxi))
ctrl.write('set %s head %s.1.N\n'%(vxi, vxi))
ctrl.write('set %s tail %s.1.C\n'%(vxi, vxi))
ctrl.write('saveoff %s %s.lib\n'%(vxi, vxi))
ctrl.write("quit\n")
ctrl.close()
Leapy.run('lyp.in', outputfile)
def all_make():
for i in range(0,110,10):
Frcmod_creator.make ('{}_{}.frcmod'.format(i, 100-i))
def cal(x, y, i):
num = x+((y-x)/10)*i
return num
def lac(y, x, i):
num = x+((y-x)/10)*i
return num
def stock_add_to_all(var=variablemake()):
metcar = var[0]
methyd = var[1]
hydhyd1 = var[2]
carcar = var[3]
caroxy = var[4]
hydhyd2 = var[5]
Frcmod_creator.make_hyb()
Frcmod_creator.TYPE_insert(carcar, 'C', 'sp2')
Frcmod_creator.TYPE_insert(caroxy, 'O', 'sp2')
Frcmod_creator.TYPE_insert(hydhyd2, 'H', 'sp3')
Frcmod_creator.TYPE_insert(metcar, 'C', 'sp3')
Frcmod_creator.TYPE_insert(methyd, 'H', 'sp3')
Frcmod_creator.TYPE_insert(hydhyd1, 'H', 'sp3')
p = {}
with open('Param_files/Stock/Stock.param', 'r') as b:
data = b.readlines()[1:]
for line in data:
p[line.split()[0]] = []
for point in line.split()[1:]:
p[line.split()[0]].append(float(point))
b.close()
for i in range(11):
a = i*10
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), carcar, cal(p['C'][0], p['0_C'][0], i), cal(p['C'][1], p['0_C'][1], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), caroxy, cal(p['O2'][0], p['0_O'][0], i), cal(p['O2'][1], p['0_O'][1], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), hydhyd2, cal(p['0_H'][0], p['HC'][0], i), cal(p['0_H'][1], p['HC'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format('CT', carcar), cal(p['CT_C'][0], p['CT_mH'][0], i), cal(p['CT_C'][1], p['CT_mH'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format('CT', hydhyd2), cal(p['HC_sC2'][0], p['CT_HC'][0], i), cal(p['HC_sC2'][1], p['CT_HC'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format(carcar, caroxy), cal(p['C_O2'][0], p['O2_mH'][0], i), cal(p['C_O2'][1], p['O2_mH'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', carcar, caroxy), cal(p['C_C_O2'][0], p['Dritt'][0], i), cal(p['C_C_O2'][1], p['Dritt'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(caroxy, carcar, caroxy), cal(p['O2_C_O2'][0], p['Close'][0], i), cal(p['O2_C_O2'][1], p['Close'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', 'CT', carcar), cal(p['CT_CT_C'][0], p['C_C_H'][0], i), cal(p['CT_CT_C'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(hydhyd2, 'CT', carcar), cal(p['Close'][0], p['Close'][0], i), cal(p['Close'][1], p['Close'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', 'CT', hydhyd2), cal(p['C_C_H'][0], p['C_C_H'][0], i), cal(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('HC', 'CT', carcar), lac(p['C_C_H'][0], p['C_C_H'][0], i), lac(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('HC', 'CT', hydhyd2), lac(p['H_C_H'][0], p['H_C_H'][0], i), lac(p['H_C_H'][1], p['H_C_H'][1], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format('CT', 'CT', carcar, caroxy), cal(p['Ring_Dihe_2'][0], p['Ring_Dihe_2'][0], i), cal(p['Ring_Dihe_2'][1], p['Ring_Dihe_2'][1], i), cal(p['Ring_Dihe_2'][2], p['Ring_Dihe_2'][2], i), cal(p['Ring_Dihe_2'][3], p['Ring_Dihe_2'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format('HC', 'CT', carcar, caroxy), lac(p['Ring_Dihe_2'][0], p['Ring_Dihe_2'][0], i), lac(p['Ring_Dihe_2'][1], p['Ring_Dihe_2'][1], i), lac(p['Ring_Dihe_2'][2], p['Ring_Dihe_2'][2], i), lac(p['Ring_Dihe_2'][3], p['Ring_Dihe_2'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(hydhyd2, 'CT', carcar, caroxy), cal(p['0_Dihe'][0], p['0_Dihe'][0], i), cal(p['0_Dihe'][1], p['0_Dihe'][1], i), cal(p['0_Dihe'][2], p['0_Dihe'][2], i), cal(p['0_Dihe'][3], p['0_Dihe'][3], i))
Frcmod_creator.IMPROPER_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format('X ', caroxy, carcar, caroxy), cal(p['Car_imp'][0], p['Imp_0'][0], i), cal(p['Car_imp'][1], p['Imp_0'][1], i), cal(p['Car_imp'][2], p['Imp_0'][2], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), carcar, cal(p['C'][2], p['0_C'][2], i), cal(p['C'][3], p['0_C'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), caroxy, cal(p['O2'][2], p['0_O'][2], i), cal(p['O2'][3], p['0_O'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), hydhyd2, cal(p['0_H'][2], p['HC'][2], i), cal(p['0_H'][3], p['HC'][3], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(hydhyd1, 'CT', carcar), lac(p['C_C_H'][0], p['C_C_H'][0], i), lac(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(metcar, 'CT', carcar), lac(p['C_C_H'][0], p['C_C_H'][0], i), lac(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(hydhyd1, 'CT', hydhyd2), lac(p['H_C_H'][0], p['H_C_H'][0], i), lac(p['H_C_H'][1], p['H_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(hydhyd2, 'CT', metcar), lac(p['C_C_H'][0], p['C_C_H'][0], i), lac(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(hydhyd1, 'CT', carcar, caroxy), lac(p['Ring_Dihe_2'][0], p['Ring_Dihe_2'][0], i), lac(p['Ring_Dihe_2'][1], p['Ring_Dihe_2'][1], i), lac(p['Ring_Dihe_2'][2], p['Ring_Dihe_2'][2], i), lac(p['Ring_Dihe_2'][3], p['Ring_Dihe_2'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(metcar, 'CT', carcar, caroxy), lac(p['0_Dihe'][0], p['0_Dihe'][0], i), lac(p['0_Dihe'][1], p['0_Dihe'][1], i), lac(p['0_Dihe'][2], p['0_Dihe'][2], i), lac(p['0_Dihe'][3], p['0_Dihe'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(methyd, metcar, 'CT', carcar), lac(p['0_Dihe'][0], p['0_Dihe'][0], i), lac(p['0_Dihe'][1], p['0_Dihe'][1], i), lac(p['0_Dihe'][2], p['0_Dihe'][2], i), lac(p['0_Dihe'][3], p['0_Dihe'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(hydhyd2, 'CT', metcar, methyd), lac(p['H_C_C_H'][0], p['0_1'][0], i), lac(p['H_C_C_H'][1], p['0_1'][1], i), lac(p['H_C_C_H'][2], p['0_1'][2], i), lac(p['H_C_C_H'][3], p['0_1'][3], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), metcar, lac(p['CT'][0], p['0_C'][0], i), lac(p['CT'][1], p['0_C'][1], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), methyd, lac(p['HC'][0], p['0_H'][0], i), lac(p['HC'][1], p['0_H'][1], i))
Frcmod_creator.MASS_insert('{}_{}.frcmod'.format(a, 100-a), hydhyd1, lac(p['0_H'][0], p['HC'][0], i), lac(p['0_H'][1], p['HC'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format('CT', metcar), lac(p['CT_CT'][0], p['CT_mH'][0], i), lac(p['CT_CT'][1], p['CT_mH'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format('CT', hydhyd1), lac(p['HC_sC'][0], p['CT_HC'][0], i), lac(p['HC_sC'][1], p['CT_HC'][1], i))
Frcmod_creator.BOND_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}'.format(metcar, methyd), lac(p['CT_HC'][0], p['HC_mH'][0], i), lac(p['CT_HC'][1], p['HC_mH'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(hydhyd1, 'CT', metcar), lac(p['Close'][0], p['Close'][0], i), lac(p['Close'][1], p['Close'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', metcar, methyd), lac(p['C_C_H'][0], p['Dritt'][0], i), lac(p['C_C_H'][1], p['Dritt'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format(methyd, metcar, methyd), lac(p['H_C_H'][0], p['Close'][0], i), lac(p['H_C_H'][1], p['Close'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', 'CT', metcar), lac(p['C_C_C'][0], p['C_C_C'][0], i), lac(p['C_C_C'][1], p['C_C_C'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('HC', 'CT', metcar), lac(p['C_C_H'][0], p['C_C_H'][0], i), lac(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('CT', 'CT', hydhyd1), lac(p['C_C_H'][0], p['C_C_H'][0], i), lac(p['C_C_H'][1], p['C_C_H'][1], i))
Frcmod_creator.ANGLE_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}'.format('HC', 'CT', hydhyd1), lac(p['H_C_H'][0], p['H_C_H'][0], i), lac(p['H_C_H'][1], p['H_C_H'][1], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format('CT', 'CT', metcar, methyd), lac(p['C_C_C_H'][0], p['0_1'][0], i), lac(p['C_C_C_H'][1], p['0_1'][1], i), lac(p['C_C_C_H'][2], p['0_1'][2], i), lac(p['C_C_C_H'][3], p['0_1'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format('HC', 'CT', metcar, methyd), lac(p['H_C_C_H'][0], p['0_1'][0], i), lac(p['H_C_C_H'][1], p['0_1'][1], i), lac(p['H_C_C_H'][2], p['0_1'][2], i), lac(p['H_C_C_H'][3], p['0_1'][3], i))
Frcmod_creator.DIHEDRAL_insert('{}_{}.frcmod'.format(a, 100-a), '{}-{}-{}-{}'.format(hydhyd1, 'CT', metcar, methyd), lac(p['0_Dihe'][0], p['0_Dihe'][0], i), lac(p['0_Dihe'][1], p['0_Dihe'][1], i), lac(p['0_Dihe'][2], p['0_Dihe'][2], i), lac(p['0_Dihe'][3], p['0_Dihe'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), metcar, lac(p['CT'][2], p['0_C'][2], i), lac(p['CT'][3], p['0_C'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), methyd, lac(p['HC'][2], p['0_H'][2], i), lac(p['HC'][3], p['0_H'][3], i))
Frcmod_creator.NONBON_insert('{}_{}.frcmod'.format(a, 100-a), hydhyd1, lac(p['0_H'][2], p['HC'][2], i), lac(p['0_H'][3], p['HC'][3], i))
|
[
"pietro.ga.aronica@gmail.com"
] |
pietro.ga.aronica@gmail.com
|
523d16ef57141f9cb5cf5a6b82ff1956faeb6860
|
600cfc373bb90cbcaec295de2583ed6e9b722b74
|
/CHARACTER_/character_web_fin/mbti/ai_mbti_analysis.py
|
2e8c0e483f5bed987732ecf4876c9204d18c4d59
|
[] |
no_license
|
kwangilkimkenny/Story_Analysis
|
e92e5c38a7fb34948d801d3170e8709f4670e74b
|
ea7f2ed33de735918f4f3f568f7b551fbcd95a33
|
refs/heads/master
| 2022-09-02T19:25:36.283950
| 2022-08-24T00:10:03
| 2022-08-24T00:10:03
| 239,153,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,867
|
py
|
#2020-10-11
#xgboost
#Django적용을 위한 함수화처리 완료_하지만 테스트중중중....
import pandas as pd
import numpy as np
import re
import pickle
# plotting
import seaborn as sns
import matplotlib.pyplot as plt
# Tune learning_rate
from numpy import loadtxt
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
# First XGBoost model for MBTI dataset
from numpy import loadtxt
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
##### Compute list of subject with Type | list of comments
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.corpus import stopwords
from nltk import word_tokenize
import nltk
nltk.download('wordnet')
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.manifold import TSNE
#타입을 숫자로 변환
def get_types(row):
t=row['type']
I = 0; N = 0
T = 0; J = 0
if t[0] == 'I': I = 1
elif t[0] == 'E': I = 0
else: print('I-E incorrect')
if t[1] == 'N': N = 1
elif t[1] == 'S': N = 0
else: print('N-S incorrect')
if t[2] == 'T': T = 1
elif t[2] == 'F': T = 0
else: print('T-F incorrect')
if t[3] == 'J': J = 1
elif t[3] == 'P': J = 0
else: print('J-P incorrect')
return pd.Series( {'IE':I, 'NS':N , 'TF': T, 'JP': J })
#딕셔너리파일 설정
b_Pers = {'I':0, 'E':1, 'N':0, 'S':1, 'F':0, 'T':1, 'J':0, 'P':1}
#리스트를 두개씩 묶어서 리스트로 만듬
b_Pers_list = [{0:'I', 1:'E'}, {0:'N', 1:'S'}, {0:'F', 1:'T'}, {0:'J', 1:'P'}]
def translate_personality(personality):
# transform mbti to binary vector
return [b_Pers[l] for l in personality]
def translate_back(personality):
# transform binary vector to mbti personality
s = ""
for i, l in enumerate(personality):
s += b_Pers_list[i][l]
return s
# We want to remove these from the psosts
unique_type_list = ['INFJ', 'ENTP', 'INTP', 'INTJ', 'ENTJ', 'ENFJ', 'INFP', 'ENFP',
'ISFP', 'ISTP', 'ISFJ', 'ISTJ', 'ESTP', 'ESFP', 'ESTJ', 'ESFJ']
unique_type_list = [x.lower() for x in unique_type_list]
# Lemmatize
stemmer = PorterStemmer()
lemmatiser = WordNetLemmatizer()
# Cache the stop words for speed
cachedStopWords = stopwords.words("english")
def pre_process_data(data, remove_stop_words=True, remove_mbti_profiles=True):
list_personality = []
list_posts = []
len_data = len(data)
i=0
for row in data.iterrows():
i+=1
if (i % 500 == 0 or i == 1 or i == len_data):
print("%s of %s rows" % (i, len_data))
##### Remove and clean comments
posts = row[1].posts
temp = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' ', posts)
temp = re.sub("[^a-zA-Z]", " ", temp)
temp = re.sub(' +', ' ', temp).lower()
if remove_stop_words:
temp = " ".join([lemmatiser.lemmatize(w) for w in temp.split(' ') if w not in cachedStopWords])
else:
temp = " ".join([lemmatiser.lemmatize(w) for w in temp.split(' ')])
if remove_mbti_profiles:
for t in unique_type_list:
temp = temp.replace(t,"")
type_labelized = translate_personality(row[1].type)
list_personality.append(type_labelized)
list_posts.append(temp)
list_posts = np.array(list_posts)
list_personality = np.array(list_personality)
return list_posts, list_personality
# read data
# data = pd.read_csv('/Users/jongphilkim/Desktop/Django_WEB/essayfitaiproject_2020_12_09/essayai/mbti_1.csv')
data = pd.read_csv('./essayai/data/mbti_1.csv')
# get_types 함수 적용
data = data.join(data.apply (lambda row: get_types (row),axis=1))
# load
with open('./essayai/ai_character/mbti/list_posts.pickle', 'rb') as f:
list_posts = pickle.load(f)
# load
with open('./essayai/ai_character/mbti/list_personality.pickle', 'rb') as f:
list_personality = pickle.load(f)
# # Posts to a matrix of token counts
cntizer = CountVectorizer(analyzer="word",
max_features=1500,
tokenizer=None,
preprocessor=None,
stop_words=None,
max_df=0.7,
min_df=0.1)
# Learn the vocabulary dictionary and return term-document matrix
print("CountVectorizer...")
X_cnt = cntizer.fit_transform(list_posts)
#################################################
#save!!! model X_cnt
import pickle
# save
# with open('./essayai/ai_character/mbti/data_X_cnt.pickle', 'wb') as f:
# pickle.dump(X_cnt, f, pickle.HIGHEST_PROTOCOL)
# load
with open('./essayai/ai_character/mbti/data_X_cnt.pickle', 'rb') as f:
X_cnt = pickle.load(f)
#################################################
# Transform the count matrix to a normalized tf or tf-idf representation
tfizer = TfidfTransformer()
print("Tf-idf...")
# Learn the idf vector (fit) and transform a count matrix to a tf-idf representation
X_tfidf = tfizer.fit_transform(X_cnt).toarray()
# load
with open('./essayai/ai_character/mbti/data.pickle', 'rb') as f:
X_tfidf = pickle.load(f)
def mbti_classify(text):
type_indicators = [ "IE: Introversion (I) / Extroversion (E)", "NS: Intuition (N) – Sensing (S)",
"FT: Feeling (F) - Thinking (T)", "JP: Judging (J) – Perceiving (P)" ]
# Posts in tf-idf representation
X = X_tfidf
my_posts = str(text)
# The type is just a dummy so that the data prep fucntion can be reused
mydata = pd.DataFrame(data={'type': ['INFJ'], 'posts': [my_posts]})
my_posts, dummy = pre_process_data(mydata, remove_stop_words=True)
my_X_cnt = cntizer.transform(my_posts)
my_X_tfidf = tfizer.transform(my_X_cnt).toarray()
# setup parameters for xgboost
param = {}
param['n_estimators'] = 200
param['max_depth'] = 2
param['nthread'] = 8
param['learning_rate'] = 0.2
result = []
# Let's train type indicator individually
for l in range(len(type_indicators)):
print("%s ..." % (type_indicators[l]))
Y = list_personality[:,l]
# split data into train and test sets
seed = 7
test_size = 0.33
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
# fit model on training data
model = XGBClassifier(**param)
model.fit(X_train, y_train)
# make predictions for my data
y_pred = model.predict(my_X_tfidf)
result.append(y_pred[0])
# print("* %s prediction: %s" % (type_indicators[l], y_pred))
print("The result is: ", translate_back(result))
#결과를 리스트에 담고
Result_list = list(translate_back(result))
#mbit 결과값에 따라 내용 print 하기
# read data
# data = pd.read_csv('/Users/jongphilkim/Desktop/Django_WEB/essayfitaiproject/essayai/mbti_exp.csv')
data = pd.read_csv('./essayai/data/mbti_exp.csv')
#새로운 데이터프레임을 만들어서 계산된 값을 추가할 예정
df2 = pd.DataFrame(index=range(0,4),columns=['Type', 'Explain'])
#리스트에서 한글자씩 불러와서 데이터프레임의 값을 출력하면 됨
for i in range(0, len(Result_list)):
type = Result_list[i]
for j in range(0, len(data)):
if type == data.iloc[j,0]:
break
is_mbti = data.iloc[j,2]
df2.iloc[i, [0,1]] = [type, is_mbti]
print(df2)
return df2
# my_posts = """Describe a place or environment where you are perfectly content. What do you do or experience there, and why is it meaningful to you? 644 words out of 650 Gettysburg, a small town in the middle of Pennsylvania, was the sight of the largest, bloodiest battle in the Civil War. Something about these hallowed grounds draws me back every year for a three day camping trip with my family over Labor Day weekend. Every year, once school starts, I count the days until I take that three and half hour drive from Pittsburgh to Gettysburg. Each year, we leave after school ends on Friday and arrive in Gettysburg with just enough daylight to pitch the tents and cook up a quick dinner on the campfire. As more of the extended family arrives, we circle around the campfire and find out what is new with everyone. The following morning, everyone is up by nine and helping to make breakfast which is our best meal of the day while camping. Breakfast will fuel us for the day as we hike the vast battlefields. My Uncle Mark, my twin brother, Andrew, and I like to take charge of the family tour since we have the most passion and knowledge about the battle. I have learned so much from the stories Mark tells us while walking on the tours. Through my own research during these last couple of trips, I did some of the explaining about the events that occurred during the battle 150 years ago. My fondest experience during one trip was when we decided to go off of the main path to find a carving in a rock from a soldier during the battle. Mark had read about the carving in one of his books about Gettysburg, and we were determined to locate it. After almost an hour of scanning rocks in the area, we finally found it with just enough daylight to read what it said. After a long day of exploring the battlefield, we went back to the campsite for some 'civil war' stew. There is nothing special about the stew, just meat, vegetables and gravy, but for whatever reason, it is some of the best stew I have ever eaten. For the rest of the night, we enjoy the company of our extended family. My cousins, my brother and I listen to the stories from Mark and his friends experiences' in the military. After the parents have gone to bed, we stay up talking with each other, inching closer and closer to the fire as it gets colder. Finally, we creep back into our tents, trying to be as quiet as possible to not wake our parents. The next morning we awake red-eyed from the lack of sleep and cook up another fantastic breakfast. Unfortunately, after breakfast we have to pack up and head back to Pittsburgh. It will be another year until I visit Gettysburg again. There is something about that time I spend in Gettysburg that keeps me coming back to visit. For one, it is just a fun, relaxing time I get to spend with my family. This trip also fulfills my love for the outdoors. From sitting by the campfire and falling asleep to the chirp of the crickets, that is my definition of a perfect weekend. Gettysburg is also an interesting place to go for Civil War buffs like me. While walking down the Union line or walking Pickett's Charge, I imagine how the battle would have been played out around me. Every year when I visit Gettysburg, I learn more facts and stories about the battle, soldiers and generally about the Civil War. While I am in Gettysburg, I am perfectly content, passionate about the history and just enjoying the great outdoors with my family. This drive to learn goes beyond just my passion for history but applies to all of the math, science and business classes I have taken and clubs I am involved in at school. Every day, I am genuinely excited to learn.
# """
# test = mbti_classify(my_posts)
# print ('check')
# test
# print ('check2')
|
[
"noreply@github.com"
] |
kwangilkimkenny.noreply@github.com
|
32a4734ac9c9ad913746b714ccef73833c5fc842
|
05c405652de52ada1b39b313f49d697ec9e23789
|
/DataMining/Task5/Ex2. StringGenerate.py
|
2511e76acdb32c2df7d9eb480c867f6d7d418d8b
|
[] |
no_license
|
LukichevaPolina/2nd-cource
|
32f149cd94fdea239fe5193e16f7c4718430c771
|
1b7d82870c5079d93a3faf6d58d8287964c3c5c3
|
refs/heads/main
| 2023-07-16T06:58:39.331027
| 2021-08-25T18:23:25
| 2021-08-25T18:23:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
import random
def StringGenerate(string, size, repetition):
if not repetition and (size > len(string)):
return 'Error: unable to compose a string!'
result = ''
if repetition:
for i in range(size):
result = result + string[random.randint(0, len(string) - 1)]
else:
for i in range(size):
i = random.randint(0, len(string) - 1)
result = result + string[i]
string = string.replace(string[i], '', 1)
return result
print(StringGenerate('asaaaqwe', 7, False))
|
[
"63358667+LukichevaPolina@users.noreply.github.com"
] |
63358667+LukichevaPolina@users.noreply.github.com
|
354cd069b9195ce2cabedf5b537fbef6f1713e6b
|
8c7b03f24517e86f6159e4d74c8528bfbcbf31af
|
/test/python_api/lldbutil/frame/TestFrameUtils.py
|
04d398bc5fa1b95d457aa1aaae5bd15ded01ab94
|
[
"NCSA"
] |
permissive
|
markpeek/lldb
|
f849567fbd7791be10aacd41be44ee15f1a4fdc4
|
58c8d5af715a3da6cbb7e0efc6905e9d07410038
|
refs/heads/master
| 2021-01-15T17:01:57.014568
| 2011-12-24T01:08:58
| 2011-12-24T01:08:58
| 3,042,888
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,103
|
py
|
"""
Test utility functions for the frame object.
"""
import os
import unittest2
import lldb
from lldbtest import *
class FrameUtilsTestCase(TestBase):
mydir = os.path.join("python_api", "lldbutil", "frame")
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.c',
"// Find the line number here.")
@python_api_test
def test_frame_utils(self):
"""Test utility functions for the frame object."""
self.buildDefault()
self.frame_utils()
def frame_utils(self):
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation("main.c", self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(None, None, os.getcwd())
if not process:
self.fail("SBTarget.LaunchProcess() failed")
self.assertTrue(process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
import lldbutil
thread = lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint)
frame0 = thread.GetFrameAtIndex(0)
frame1 = thread.GetFrameAtIndex(1)
parent = lldbutil.get_parent_frame(frame0)
self.assertTrue(parent and parent.GetFrameID() == frame1.GetFrameID())
frame0_args = lldbutil.get_args_as_string(frame0)
parent_args = lldbutil.get_args_as_string(parent)
self.assertTrue(frame0_args and parent_args and "(int)val=1" in frame0_args)
if self.TraceOn():
lldbutil.print_stacktrace(thread)
print "Current frame: %s" % frame0_args
print "Parent frame: %s" % parent_args
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
[
"mark@peek.org"
] |
mark@peek.org
|
4d1a204cf4c627c98d424749964ad2314e75c6ba
|
d0df2a7b54862dbd76b37536fef2d44cd6e6d1aa
|
/RpiHMI/function.py
|
49bf5c1cb3af0484ad3b59c00950f497e1b16143
|
[] |
no_license
|
eigger/RpiHMI
|
bf40cbe462b73af8a99a0c7d8c4ba50966360d15
|
93c3f4a0f7dc73ac73b08881966983f08920514a
|
refs/heads/main
| 2023-04-04T00:54:15.222254
| 2021-04-12T17:17:37
| 2021-04-12T17:17:37
| 354,900,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
import threading
import time
def asyncf(func, *args):
thread = threading.Thread(target=func, args=(args))
thread.daemon = True
thread.start()
def timerf(interval, func, *args):
thread = threading.Timer(interval, func, args)
thread.daemon = True
thread.start()
if __name__ == '__main__':
print("start")
|
[
"eigger87@gmail.com"
] |
eigger87@gmail.com
|
18093775e36a02a55ed210d93ea5fe0eb5127ffc
|
3201b061fef61be0263f5401771d2ae86955af4a
|
/scrapyuniversal/scrapyuniversal/items.py
|
8369ddf907b111ac3b9bacecf5fca2b3cfd60014
|
[] |
no_license
|
echohsq/crawl_gouwu
|
2d56bd8b6eeb4036d0566a50b204f3c7ba16d8d5
|
491280b90bef2756a409b6173944fb4a4c685325
|
refs/heads/master
| 2023-08-03T05:47:39.599795
| 2021-09-22T10:29:19
| 2021-09-22T10:29:19
| 409,160,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Field, Item
class NewsItem(Item):
collection = table = 'tech_china'
title = Field()
url = Field()
text = Field()
datetime = Field()
source = Field()
website = Field()
|
[
"haoshengqiang@cnpc.com.cn"
] |
haoshengqiang@cnpc.com.cn
|
44ca2e8649630c0f338c6636d11ae3d772d89710
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03418/s842655187.py
|
e812523bc9e5891268bd0c4350311e175da8ddc3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
N,K=map(int,input().split())
a=0
for i in range(K+1,N+1):
t=N//i
n=N-t*i
a+=t*(i-K)
if K:
a+=max(0,n-K+1)
else:
a+=n
print(a)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
37127d5b8a6a6b24818d530c5047e948f726aa04
|
d2d7977d76d274ec43ee74d5f830e2d921d82425
|
/generate_waveforms_script_EOB.py
|
bf3119bfa20aa0221a64766dedd5a2d148dc7fd7
|
[] |
no_license
|
iphysresearch/GW_parameter_estimation
|
d337c0857e69c6e0f14ec48603165411b10b2014
|
1e990740a84ab8fd02c68b2d5fe02cfe93a0424e
|
refs/heads/main
| 2023-08-30T08:13:14.493859
| 2021-10-27T12:39:52
| 2021-10-27T12:39:52
| 389,299,013
| 3
| 0
| null | 2021-07-25T08:30:24
| 2021-07-25T08:30:23
| null |
UTF-8
|
Python
| false
| false
| 927
|
py
|
import lfigw.waveform_generator as wfg
wfd = wfg.WaveformDataset(spins_aligned=False, domain='RB',
extrinsic_at_train=True)
wfd.Nrb = 600
wfd.approximant = 'SEOBNRv4P'
wfd.load_event('data/events/GW150914_10Hz/')
wfd.importance_sampling = 'uniform_distance'
wfd.prior['distance'] = [100.0, 1000.0]
wfd.prior['a_1'][1] = 0.88
wfd.prior['a_2'][1] = 0.88
print('Dataset properties')
print('Event', wfd.event)
print(wfd.prior)
print('f_min', wfd.f_min)
print('f_min_psd', wfd.f_min_psd)
print('f_max', wfd.f_max)
print('T', wfd.time_duration)
print('reference time', wfd.ref_time)
wfd.generate_reduced_basis(50000)
wfd.generate_dataset(1000000)
wfd.generate_noisy_test_data(5000)
wfd.save('../waveforms/GW150914_SEOBNRv4P')
wfd.save_train('../waveforms/GW150914_SEOBNRv4P')
wfd.save_noisy_test_data('../waveforms/GW150914_SEOBNRv4P')
print('Program complete. Waveform dataset has been saved.')
|
[
"hewang@mail.bnu.edu.cn"
] |
hewang@mail.bnu.edu.cn
|
ca5c45856918da31bc8db94f507be26f36d20cb6
|
dda51ce6178e0c74ac4de1f814796ae12c5a3b2a
|
/OE_Game.py
|
a28fe7f6feb621d4be5807b2c7670e80d5a52762
|
[] |
no_license
|
LiteBrick204/Odd_or_Even_Game_in_Python
|
8cb5837a14d676b0a7577e742811427cb67b4a97
|
8d851f0dfcd49c8bd7b6e38dd10f48fbba0e864c
|
refs/heads/master
| 2022-12-25T18:46:48.732418
| 2020-10-01T12:20:39
| 2020-10-01T12:20:39
| 295,441,155
| 8
| 1
| null | 2020-10-01T12:20:41
| 2020-09-14T14:26:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,690
|
py
|
#!/usr/bin/env python
import random
def toss(u):
t = random.randint(0,1)
g = "bat" if bool(t) else "bowl"
if u==t:
return [int(input("You Won the Toss! Press 1 to bat and 2 to bowl:")),u==g]
else:
print("Computer won the toss and choose to",g)
return [t+1,u==g]
def check(C,w):
if C=='n':
if not w:print("Thank you for playing")
quit()
elif C=='y' and w:
game()
class GameOver(Exception):
pass
def game():
score = 0
C = input("Do you want to start(Y/N):")
check(C,0)
l = int(input("Call for toss! Press 0 for head and 1 for tail :"))
B = toss(l)
f = 1
c,u = 0,3
while c!=u and f==1 and C=='y':
try:
g = random.randint(1,6)
u = int(input(">>>"))
print("Computer Entered",g)
if u<=0 or u>6:
f = 2
raise ValueError
if u==g and B[0]==1:
print("YOU ARE OUT!!")
print("Your Score is",score)
f = -1
raise GameOver
elif u==g and B[0]==2:
print("The Computer is out!")
batround(score+1,1)
else:
score+=u if B[0]==1 and B[1] else g
continue
except GameOver:
if f==-1:
batround(score+1,0)
except ValueError:
if f==-1:
print("You can only enter numbers between 1-6 only")
u=0
f=0
continue
def batround(N,who):
print("%s need"%("You" if who else "Computer"),N,"runs to win the match!!")
u,c,f = 1,3,0
while N:
try:
g = random.randint(1,6)
u = int(input(">>>"))
print("Computer Entered",g)
if u<=0 or u>6:
f = 2
raise ValueError
elif u==g:
print("%s ARE OUT!!"%("You" if who else "Computer"))
print("Remaining Score is",N)
f = -1
raise GameOver
elif ((who and (N-u)<=0) or (not who and (N-g)<=0)) or N==0:
print("%s won the match!!" %("You" if who else "Computer"))
raise GameOver
elif (who and u in range(1,7)): N-=u
elif not who: N-=g
except ValueError:
if f==2:
print("You can only enter numbers between 1-6 only")
u=0
f=0
except GameOver:
print("GG!! Thank You for Playing with me.\nKudos and Godspeed!!!\n\t\t\t~Computer")
check(input("Enter n to quit: ").strip(),1)
game()
|
[
"user@xyz.in"
] |
user@xyz.in
|
6944997ed36b9516e6282fd746cfd8cb8533635c
|
9ad3803c73867cc66956c53aa0e0f023c2e9b7a9
|
/src/setup.py
|
1ccad261b1321dc0f81274ea679a66bfc3cf97f2
|
[] |
no_license
|
pcloth/api-shop
|
2f9d963971ab0eae8db7e48cfaac984ba104d079
|
6cae15e057172b33ec7d9b3317d033e7ceae71e1
|
refs/heads/master
| 2023-07-22T09:00:39.018633
| 2023-07-07T07:36:27
| 2023-07-07T07:36:27
| 165,461,139
| 43
| 4
| null | 2023-03-06T04:46:13
| 2019-01-13T03:21:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,589
|
py
|
#!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
import re, ast, pathlib
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('api_shop/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')
).group(1)))
setup(
name='api-shop',
version=version,
description=(
'RESTful api shop for django or flask or bottle'
),
long_description=pathlib.Path('README.MD').read_text(encoding='utf-8'),
long_description_content_type='text/markdown',
author='pcloth',
author_email='pcloth@gmail.com',
maintainer='pcloth',
maintainer_email='pcloth@gmail.com',
license='BSD License',
packages=find_packages(),
include_package_data=True,
exclude_package_date={'':['.gitignore']},
keywords=['api-shop', 'Flask-RESTful', 'Django REST framework', 'RESTful'],
platforms=["all"],
url='https://github.com/pcloth/api-shop',
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries'
],
)
|
[
"pcloth@gmail.com"
] |
pcloth@gmail.com
|
df508d4346a333c8b0c275896dd8c534e956fe0d
|
9bad4b4c20a6b26d96ac9e0c7a7587749121aa5f
|
/src/main/python/mlonspark/scaler.py
|
05fa9472f8bbf0334756b46a6e4d05728a00e87b
|
[] |
no_license
|
barenode/bp
|
2161fa2e02cbd0a48de6555a14a2816e8dc0b6ed
|
e2d279ff8dc21db2d23d0740ce0de0fb2e811c07
|
refs/heads/master
| 2022-12-26T08:11:32.215682
| 2020-05-30T03:51:34
| 2020-05-30T03:51:34
| 168,565,024
| 0
| 0
| null | 2020-10-13T11:52:28
| 2019-01-31T17:21:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,047
|
py
|
import sys
from pyspark import since, keyword_only
from pyspark.ml.param.shared import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaTransformer, _jvm, JavaParams
class ScalerModel(JavaModel):
_classpath_model = 'mlonspark.ScalerModel'
@staticmethod
def _from_java(java_stage):
"""
Given a Java object, create and return a Python wrapper of it.
Used for ML persistence.
Meta-algorithms such as Pipeline should override this method as a classmethod.
"""
# Generate a default new instance from the stage_name class.
py_type = ScalerModel
if issubclass(py_type, JavaParams):
# Load information from java_stage to the instance.
py_stage = py_type()
py_stage._java_obj = java_stage
py_stage._resetUid(java_stage.uid())
py_stage._transfer_params_from_java()
return py_stage
class Scaler(JavaEstimator, HasInputCol, HasOutputCol):
groupCol = Param(Params._dummy(), "groupCol", "groupCol", typeConverter=TypeConverters.toString)
_classpath = 'mlonspark.Scaler'
@keyword_only
def __init__(self):
super(Scaler, self).__init__()
self._java_obj = self._new_java_obj(
Scaler._classpath ,
self.uid
)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self):
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return ScalerModel(java_model)
def setGroupCol(self, value):
return self._set(groupCol=value)
def getGroupCol(self):
return self.getOrDefault(self.groupCol)
def setOutputCol(self, value):
return self._set(outputCol=value)
def getOutputCol(self):
return self.getOrDefault(self.outputCol)
def setInputCol(self, value):
return self._set(inputCol=value)
def getInputCol(self):
return self.getOrDefault(self.inputCol)
|
[
"frantisek.hylmar@gmail.com"
] |
frantisek.hylmar@gmail.com
|
2150af8db3f4f64b86685075d6ed96e3845861c3
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_chauffeur.py
|
97f43e805d5aba06eb05a5fd2bd9c150fd38b7be
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
#calss header
class _CHAUFFEUR():
def __init__(self,):
self.name = "CHAUFFEUR"
self.definitions = [u'someone whose job is to drive a car for a rich or important person: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
b293f33dad83407bdb8ddb7a4f4a6e9e35487745
|
43dc562227787ef5662327444362bfb17bec2eb0
|
/opt/app.py
|
cb86cb2f7055e5843be78bb91992e2b243ccee01
|
[] |
no_license
|
masafumi-tk/myhome-dash
|
1ff4572a1de571eefad3fac9bbc34a2b33c027c5
|
79cc4d0ca672c9a87de08d1bf7ab4e495c8b66a3
|
refs/heads/master
| 2023-08-13T09:41:29.605223
| 2021-09-14T13:02:15
| 2021-09-14T13:02:15
| 406,361,411
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
# -*- coding: utf-8 -*-
# Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# assume you have a "long-form" data frame
# see https://plotly.com/python/px-arguments/ for more options
df = pd.DataFrame({
"Fruit": ["Apples", "Oranges", "Bananas", "Apples", "Oranges", "Bananas"],
"Amount": [4, 1, 2, 2, 4, 5],
"City": ["SF", "SF", "SF", "Montreal", "Montreal", "Montreal"]
})
fig = px.bar(df, x="Fruit", y="Amount", color="City", barmode="group")
app.layout = html.Div(children=[
html.H1(children='Hello Dash'),
html.Div(children='''
Dash: A web application framework for Python.
'''),
dcc.Graph(
id='example-graph',
figure=fig
)
])
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"masafumi.cascata@gmail.com"
] |
masafumi.cascata@gmail.com
|
792117aef1b769f6243511b155a062c0569bca31
|
d916ab8c00fb99c1da73e7eb6363944e2debbace
|
/venv/bin/pip
|
396419667de67070583c2165df4ab927c128805a
|
[] |
no_license
|
apap26/a_helicopter_TRPO
|
fcc85a8f5ef188de9e71934a75a5d8951fe4ac56
|
974e675cc73379b36adccd290f38d568a5acdc94
|
refs/heads/master
| 2023-01-12T14:30:19.430020
| 2020-11-11T21:00:33
| 2020-11-11T21:00:33
| 296,840,478
| 2
| 2
| null | 2020-11-06T17:14:09
| 2020-09-19T10:13:02
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 401
|
#!/home/apap26/PycharmProjects/magazin/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"apap26@yandex.ru"
] |
apap26@yandex.ru
|
|
0afbaee6a3d11e935314a77c986fca4852eeb54e
|
d326cd8d4ca98e89b32e6a6bf6ecb26310cebdc1
|
/rosalind/bioinformatics/stronghold/tran/main.py
|
131ebf08d6ecc2fe6278e7b4127c11468845a825
|
[] |
no_license
|
dswisher/rosalind
|
d6af5195cdbe03adb5a19ed60fcbf8c05beac784
|
4519740350e47202f7a45ce70e434f7ee15c6afc
|
refs/heads/master
| 2021-08-09T02:58:17.131164
| 2017-11-12T01:26:26
| 2017-11-12T01:26:26
| 100,122,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
import sys
from rosalind.common import util
from rosalind.bioinformatics.common import fasta
def compute_ratio(seq1, seq2):
transitions = set(['AG', 'GA', 'CT', 'TC'])
transversions = set(['AC', 'CA', 'GT', 'TG', 'AT', 'TA', 'CG', 'GC'])
numTransitions = 0
numTransversions = 0
for i in xrange(len(seq1)):
x = seq1[i] + seq2[i]
if x in transitions:
numTransitions += 1
elif x in transversions:
numTransversions += 1
return float(numTransitions) / numTransversions
def main(fname):
seqs, _ = fasta.read(util.find_file(fname))
if len(seqs[0]) != len(seqs[1]):
print "Sequences have different lengths!"
sys.exit(1)
print compute_ratio(seqs[0], seqs[1])
if __name__ == '__main__':
if len(sys.argv) != 2:
print ("You must specify the name of the data file to load!")
sys.exit(1)
main(sys.argv[1])
|
[
"big.swish@gmail.com"
] |
big.swish@gmail.com
|
8cdf3553533329f2a092d4fab6b2c1d67fed1f09
|
b7229cf95d4d348a3ef367ea99ecd7c8548c9000
|
/BonsaiCollection/Bonsai.py
|
d202f710599b253bc48d03f9ad0ee96ae9f0c0ec
|
[] |
no_license
|
ngoubaux/bonsaicollection
|
ee883a7afde9a7d13f68c23f704a8b7c346638a3
|
8707ea3d7d818992a012e10a1faed774e97ec2d0
|
refs/heads/master
| 2021-01-10T19:46:47.751766
| 2008-02-18T11:43:14
| 2008-02-18T11:43:14
| 32,511,466
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,622
|
py
|
# -*- coding: utf-8 -*-
#
# File: Bonsai.py
#
# Copyright (c) 2008 by []
# Generator: ArchGenXML Version 1.5.2
# http://plone.org/products/archgenxml
#
# GNU General Public License (GPL)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
__author__ = """unknown <unknown>"""
__docformat__ = 'plaintext'
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import *
from Products.ATVocabularyManager.namedvocabulary import NamedVocabulary
from Products.BonsaiCollection.config import *
# additional imports from tagged value 'import'
from Products.ATReferenceBrowserWidget.ATReferenceBrowserWidget import ReferenceBrowserWidget
from random import choice
##code-section module-header #fill in your manual code here
##/code-section module-header
schema = Schema((
StringField(
name='origin',
widget=SelectionWidget(
label='Origin',
label_msgid='BonsaiCollection_label_origin',
i18n_domain='BonsaiCollection',
),
vocabulary=NamedVocabulary("""BonsaiOrigin""")
),
DateTimeField(
name='acquiredDate',
widget=CalendarWidget(
show_hm=False,
label='Acquireddate',
label_msgid='BonsaiCollection_label_acquiredDate',
i18n_domain='BonsaiCollection',
)
),
ImageField(
name='project',
widget=ImageWidget(
label='Project',
label_msgid='BonsaiCollection_label_project',
i18n_domain='BonsaiCollection',
),
storage=AttributeStorage(),
sizes={'thumb':(80,80), 'normal' : (200,200)}
),
TextField(
name='remark',
widget=TextAreaWidget(
label='Remark',
label_msgid='BonsaiCollection_label_remark',
i18n_domain='BonsaiCollection',
)
),
ReferenceField(
name='container',
allowed_content_types="Container",
widget=ReferenceBrowserWidget(
startup_directory="../",
label='Container',
label_msgid='BonsaiCollection_label_container',
i18n_domain='BonsaiCollection',
),
allowed_types=('Container',),
multiValued=0,
relationship='in container'
),
ReferenceField(
name='treestyles',
widget=ReferenceWidget(
label='Treestyles',
label_msgid='BonsaiCollection_label_treestyles',
i18n_domain='BonsaiCollection',
),
allowed_types=('TreeStyle',),
multiValued=0,
relationship='as style'
),
ReferenceField(
name='species',
widget=ReferenceWidget(
label='Species',
label_msgid='BonsaiCollection_label_species',
i18n_domain='BonsaiCollection',
),
allowed_types=('Specie',),
multiValued=0,
relationship='is specie'
),
ReferenceField(
name='CurrentView',
allowed_content_types="ATPhoto",
widget=ReferenceBrowserWidget(
startup_directory=".",
label='Currentview',
label_msgid='BonsaiCollection_label_CurrentView',
i18n_domain='BonsaiCollection',
),
allowed_types=('ATPhoto',),
multiValued=0,
relationship='looks like'
),
),
)
##code-section after-local-schema #fill in your manual code here
##/code-section after-local-schema
Bonsai_schema = BaseFolderSchema.copy() + \
schema.copy()
##code-section after-schema #fill in your manual code here
##/code-section after-schema
class Bonsai(BaseFolder):
"""
"""
security = ClassSecurityInfo()
__implements__ = (getattr(BaseFolder,'__implements__',()),)
# This name appears in the 'add' box
archetype_name = 'Bonsaï'
meta_type = 'Bonsai'
portal_type = 'Bonsai'
allowed_content_types = ['ATPhotoAlbum', 'BonsaiDimension', 'BonsaiEventTreatment', 'BonsaiEventWork']
filter_content_types = 1
global_allow = 0
content_icon = 'bonsai.gif'
immediate_view = 'base_view'
default_view = 'base_view'
suppl_views = ()
typeDescription = "Bonsaï"
typeDescMsgId = 'description_edit_bonsai'
actions = (
{'action': "string:${object_url}/bonsai_view",
'category': "object",
'id': 'view',
'name': 'View',
'permissions': ("View",),
'condition': 'python:1'
},
{'action': "string:${object_url}/works_view",
'category': "object",
'id': 'works_view',
'name': 'travaux',
'permissions': ("View",),
'condition': 'python:1'
},
{'action': "string:${object_url}/illnesses_view",
'category': "object",
'id': 'illnesses_view',
'name': 'traitements',
'permissions': ("View",),
'condition': 'python:1'
},
{'action': "string:${object_url}/gallery_view",
'category': "object",
'id': 'gallery_view',
'name': 'Photo albums',
'permissions': ("View",),
'condition': 'python:1'
},
{'action': "string:${object_url}/evolution_view",
'category': "object",
'id': 'evolution_view',
'name': 'Evolution',
'permissions': ("View",),
'condition': 'python:1'
},
)
_at_rename_after_creation = True
schema = Bonsai_schema
##code-section class-header #fill in your manual code here
##/code-section class-header
# Methods
security.declarePublic('getPicture')
def getPicture(self):
"""
return the referenced photo or try to get one
"""
refs = self.getReferenceImpl(relationship='looks like');
if len(refs) > 0 :
return refs[0].getTargetObject()
else:
results = self.portal_catalog.searchResults(portal_type='ATPhoto', path='/'.join(self.getPhysicalPath()))
if len(results) > 0:
return choice(results).getObject()
pass
security.declarePublic('getEncyclopedie')
def getEncyclopedie(self):
"""
"""
results = self.portal_catalog.searchResults(portal_type='Encyclopedia')
if len(results) > 0:
return results[0].getObject()
pass
security.declarePublic('getSpecies')
def getSpecies(self):
"""
"""
results = self.portal_catalog.searchResults(portal_type='SpecieVolume')
if len(results) > 0:
return results[0].getObject()
pass
security.declarePublic('getPots')
def getPots(self):
"""
"""
results = self.aq_parent.getFolderContents(contentFilter ={'portal_type' : ['Containers']})
if len(results) > 0:
return results[0].getObject()
pass
registerType(Bonsai, PROJECTNAME)
# end of class Bonsai
##code-section module-footer #fill in your manual code here
##/code-section module-footer
|
[
"goubsi@4b32b131-8944-0410-a794-395d361ccd7d"
] |
goubsi@4b32b131-8944-0410-a794-395d361ccd7d
|
7b1dfacee784f7b05375188302ab051e0b005603
|
ad28a59209239be285d1127a87bc08893fb62cb9
|
/python/aad/test_concept_drift_classifier.py
|
76ebbe63d04d5f1a6934a18ec97cdc667445b71c
|
[
"MIT"
] |
permissive
|
Karl-Wu/ad_examples
|
9e6f894c2414640b23ccdeb39db9e9b8352ef077
|
6fb0a2a72db51d82645e377945327eb9e1ecf8b8
|
refs/heads/master
| 2020-03-26T19:33:45.128414
| 2018-08-17T21:42:15
| 2018-08-17T21:42:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,961
|
py
|
from aad.data_stream import *
from common.gen_samples import read_anomaly_dataset
from aad.anomaly_dataset_support import *
from aad.classifier_trees import RandomForestAadWrapper
"""
Check data drift with a Random Forest classifier.
NOTE: The classifier is trained only once in this example with the
first window of data. The drift is tested for the rest of the
windows *without* updating the model.
To run:
pythonw -m aad.test_concept_drift_classifier --debug --plot --log_file=temp/test_concept_drift_classifier.log --dataset=weather
"""
def test_kl_data_drift_classifier():
logger = logging.getLogger(__name__)
args = get_command_args(debug=False)
configure_logger(args)
dataset_config = dataset_configs[args.dataset]
stream_window = dataset_config[2]
alpha = 0.05
n_trees = 100
X_full, y_full = read_anomaly_dataset(args.dataset)
logger.debug("dataset: %s (%d, %d), stream_window: %d, alpha: %0.3f" %
(args.dataset, X_full.shape[0], X_full.shape[1], stream_window, alpha))
stream = DataStream(X_full, y_full, IdServer(initial=0))
# get first window of data
training_set = stream.read_next_from_stream(stream_window)
x, y, ids = training_set.x, training_set.y, training_set.ids
logger.debug("First window loaded (%s): %d" % (args.dataset, x.shape[0]))
# train classifier with the window of data
rf = RFClassifier.fit(x, y, n_estimators=n_trees)
logger.debug("Random Forest classifier created with %d trees" % rf.clf.n_estimators)
# prepare wrapper over the classifier which will compute KL-divergences
# NOTE: rf.clf is the scikit-learn Random Forest classifier instance
model = RandomForestAadWrapper(x=x, y=y, clf=rf.clf)
logger.debug("Wrapper model created with %d nodes" % len(model.w))
# compute KL replacement threshold *without* p
ref_kls, kl_q_alpha = model.get_KL_divergence_distribution(x, p=None, alpha=alpha)
# now initialize reference p
p = model.get_node_sample_distributions(x)
window = 0
while not stream.empty():
window += 1
# get next window of data and check KL-divergence
training_set = stream.read_next_from_stream(n=stream_window)
x, y = training_set.x, training_set.y
logger.debug("window %d loaded: %d" % (window, x.shape[0]))
# compare KL-divergence of current data dist against reference dist p
comp_kls, _ = model.get_KL_divergence_distribution(x, p=p)
# find which trees exceed alpha-level threshold
trees_exceeding_kl_q_alpha = model.get_trees_to_replace(comp_kls, kl_q_alpha)
n_threshold = int(2 * alpha * n_trees)
logger.debug("[%d] #trees_exceeding_kl_q_alpha: %d, threshold number of trees: %d\n%s" %
(window, len(trees_exceeding_kl_q_alpha), n_threshold, str(list(trees_exceeding_kl_q_alpha))))
if __name__ == "__main__":
test_kl_data_drift_classifier()
|
[
"smd.shubhomoydas@gmail.com"
] |
smd.shubhomoydas@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.