code
stringlengths 1
1.05M
| repo_name
stringlengths 6
83
| path
stringlengths 3
242
| language
stringclasses 222
values | license
stringclasses 20
values | size
int64 1
1.05M
|
|---|---|---|---|---|---|
<!DOCTYPE html>
<html lang="cn">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<div>Hello Cangjie!</div>
<p></p>
<script>
let xhr = new XMLHttpRequest()
xhr.open("POST", "/Hello", true)
xhr.onreadystatechange = () => {
if(xhr.readyState == 4 && xhr.status == 200){
let res = JSON.parse(xhr.responseText)
document.body.innerHTML += `<div>${res.msg}</div>`
}
}
xhr.send(JSON.stringify({
name: "Chen",
age: 999
}))
</script>
</body>
</html>
|
2301_80674151/Cangjie-Examples_4666
|
HTTPServer/index.html
|
HTML
|
apache-2.0
| 687
|
<!doctype html>
<meta charset="utf-8">
<title>登录 / 借阅系统</title>
<style>
body{font-family:Arial,Helvetica,sans-serif;font-size:14px}
table{border-collapse:collapse;margin-top:6px}
td,th{border:1px solid #999;padding:4px 8px}
button{margin:0 2px;padding:2px 6px}
.hide{display:none}
input[type=text],input[type=number]{width:120px;margin-right:4px}
</style>
<form id="f">
<input name="u" placeholder="用户名" required>
<input name="p" type="password" placeholder="密码" required>
<button type="submit" id="btn">登录</button>
</form>
<a href="#" id="toggle">切换注册</a>
<div id="info" class="hide">
欢迎 <span id="username"></span>(ID:<span id="uid"></span>) 权限:<span id="perm"></span>
</div>
<!-- 管理员 -->
<div id="adminPanel" class="hide">
<h3>管理员操作</h3>
<div>
<input id="newTitle" placeholder="书名">
<input id="newAuthor" placeholder="作者">
<button onclick="addBook()">添加书籍</button>
</div>
<div style="margin-top:4px">
<input id="updBookId" type="number" placeholder="书籍ID">
<input id="updStockValue" type="number" placeholder="新库存">
<button onclick="updStock()">修改库存</button>
</div>
</div>
<h3>全部书籍</h3>
<table id="bookTable">
<thead>
<tr><th>ID</th><th>书名</th><th>作者</th><th>库存</th><th>操作</th></tr>
</thead>
<tbody></tbody>
</table>
<h3 id="borrowTitle" class="hide">借阅记录</h3>
<table id="borrowTable" class="hide">
<thead>
<tr>
<th>借阅ID</th><th>书名</th><th>作者</th><th>用户名</th>
<th>借阅日期</th><th>归还日期</th><th>操作</th>
</tr>
</thead>
<tbody></tbody>
</table>
<script>
const base = 'http://192.168.232.129:8080';
const shortDate = str => str ? str.split('T')[0] : '';
async function get(url){return fetch(url).then(r=>r.json())}
async function post(url){return fetch(url,{method:'POST'}).then(r=>r.text())}
let isLogin = 1, curUser = null;
toggle.onclick=()=>{
isLogin=!isLogin;
btn.textContent=isLogin?'登录':'注册';
toggle.textContent=isLogin?'切换注册':'切换登录';
};
f.onsubmit=async e=>{
e.preventDefault();
const {u,p}=f;
if(isLogin){
const loginTxt=await post(`${base}/users/loginByName/?username=${u.value}&userpassword=${p.value}`);
if(loginTxt!=='Login successful'){alert(loginTxt);return;}
curUser=await get(`${base}/users/byName/?username=${u.value}`);
}else{
const regTxt=await post(`${base}/users/?username=${u.value}&userpassword=${p.value}&permission=normal`);
alert(regTxt);
if(regTxt!=='User registered successfully')return;
curUser=await get(`${base}/users/byName/?username=${u.value}`);
}
username.textContent=curUser.username;
uid.textContent=curUser.id;
perm.textContent=curUser.permission;
info.classList.remove('hide');
if(curUser.permission==='admin') adminPanel.classList.remove('hide');
await refreshAll();
f.reset();
};
async function refreshAll(){
await loadBooks();
await loadBorrowRecords();
}
async function loadBooks(){
const books=await get(`${base}/books/all`);
const tbody=bookTable.querySelector('tbody');
tbody.innerHTML='';
books.forEach(b=>{
const canBorrow=+b.stock>0;
tbody.insertAdjacentHTML('beforeend',
`<tr>
<td>${b.id}</td><td>${b.title}</td><td>${b.author}</td><td>${b.stock}</td>
<td>
${curUser ? `<button onclick="borrow(${b.id})" ${canBorrow?'':'disabled'}>借书</button>` : ''}
</td>
</tr>`);
});
}
async function loadBorrowRecords(){
let records;
if(curUser.permission==='admin'){
records=await get(`${base}/borrow-records`);
}else{
records=await get(`${base}/borrow-records/byUserId/?user_id=${curUser.id}`);
}
const tbody=borrowTable.querySelector('tbody');
tbody.innerHTML='';
for(const r of records){
const [book,user]=await Promise.all([
get(`${base}/books/byId/?id=${r.book_id}`),
get(`${base}/users/byId/?id=${r.user_id}`)
]);
const canReturn=curUser.permission==='admin' || r.user_id===curUser.id;
const returned=r.return_date && r.return_date!=='null';
tbody.insertAdjacentHTML('beforeend',
`<tr>
<td>${r.id}</td><td>${book.title}</td><td>${book.author}</td><td>${user.username}</td>
<td>${shortDate(r.borrow_date)}</td>
<td>${returned?shortDate(r.return_date):'未归还'}</td>
<td>
${canReturn && !returned
? `<button onclick="returnBook(${r.id})">还书</button>` : ''}
</td>
</tr>`);
}
borrowTitle.classList.remove('hide');
borrowTable.classList.remove('hide');
}
async function borrow(bookId){
const txt=await post(`${base}/borrow-records/borrow?user_id=${curUser.id}&book_id=${bookId}`);
alert(txt);
await refreshAll();
}
async function returnBook(recordId){
const txt=await post(`${base}/borrow-records/return?record_id=${recordId}`);
alert(txt);
await refreshAll();
}
async function addBook(){
const title=newTitle.value.trim(),author=newAuthor.value.trim();
if(!title||!author){alert('请输入书名和作者');return;}
const txt=await post(`${base}/books?title=${encodeURIComponent(title)}&author=${encodeURIComponent(author)}`);
alert(txt);
newTitle.value=''; newAuthor.value='';
await refreshAll();
}
async function updStock(){
const bookId=updBookId.value,stock=updStockValue.value;
if(!bookId||stock===''){alert('请输入书籍ID和新库存');return;}
const txt=await post(`${base}/books/updateStock/?book_id=${bookId}&new_stock=${stock}`);
alert(txt);
updBookId.value=''; updStock.value='';
await refreshAll();
}
loadBooks();
</script>
|
2301_79679684/JIT-Cangjie-examples
|
hanzongao/web/index.html
|
HTML
|
apache-2.0
| 5,699
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
<link rel="stylesheet" href="./main.css">
</head>
<body>
<h1>Coffee Order</h1>
<ul id="orders">
<!-- <li class="order" id="1">
<p>
Name:
<span class="name">John</span>
<input type="text" class="name nonedit">
</p>
<p>
Drink:
<span class="drink">Coffee</span>
<input type="text" class="drink nonedit">
</p>
<button data-id="1" class="delete-order">Delete</button>
<button class="edit-order">Edit</button>
<button class="save-edit nonedit">Save</button>
<button class="cancel-edit nonedit">Cancel</button>
</li>
<li class="order" id="2">
<p>
Name:
<span class="name">Emily</span>
<input type="text" class="name nonedit">
</p>
<p>
Drink:
<span class="drink">Mocha</span>
<input type="text" class="drink nonedit">
</p>
<button data-id="2" class="delete-order">Delete</button>
<button class="edit-order">Edit</button>
<button class="save-edit nonedit">Save</button>
<button class="cancel-edit nonedit">Cancel</button>
</li> -->
</ul>
<h4>Add a Coffee Order</h4>
<p>name: <input type="text" id="name"></p>
<p>drink: <input type="text" id="drink"></p>
<button id="add-order">Add!</button>
<script src="./jquery-3.5.1.js"></script>
<script src="./script.js"></script>
</body>
</html>
|
2301_79679684/JIT-Cangjie-examples
|
songqvlv/CoffeeOrder/src/public/index.html
|
HTML
|
apache-2.0
| 1,811
|
* {
padding: 0;
margin: 10px;
}
#orders {
list-style-type: none;
margin: 0;
}
#orders .order {
background-color: #c4c4c4;
padding: 5px;
}
#orders .nonedit {
display: none;
}
#orders .edit {
display: none;
}
button {
padding: 5px 10px;
}
|
2301_79679684/JIT-Cangjie-examples
|
songqvlv/CoffeeOrder/src/public/main.css
|
CSS
|
apache-2.0
| 277
|
$(function () {
let $name = $('#name')
let $drink = $('#drink')
let $orders = $('#orders')
function addOrder(order) {
$orders.append(`<li class="order" id="${order.id}">
<p>
Name: <span class="name">${order.name}</span>
<input type="text" class="name nonedit">
</p>
<p>
Drink: <span class="drink">${order.drink}</span>
<input type="text" class="drink nonedit">
</p>
<!--<button data-id="${order.id}" class="delete-order">Delete</button>
<button data-id="${order.id}" class="edit-order">Edit</button>
<button data-id="${order.id}" class="save-edit nonedit">Save</button>
<button data-id="${order.id}" class="cancel-edit nonedit">Cancel</button>-->
</li>`)
}
$.ajax({
url: '/orders',
type: 'GET',
dataType: 'json',
success: function (data) {
console.log(data)
//let orders = JSON.parse(data)这里的data已经是js对象了,而且是个对象数组
$.each(data, function (index, order) {
addOrder(order)
})
},
error: function () {
alert('error in GET')
}
})
$('#add-order').on('click', function () {
if($name.val() === '' || $drink.val() === ''){
alert('Please input name and drink')
return
}
$.ajax({
url: '/orders',
type: 'POST',
dataType: 'json', //服务器返回的数据类型
contentType: 'application/json', //发送的数据类型
data: JSON.stringify({
name: $name.val(),
drink: $drink.val()
}),
success: function (data) {
console.log(data)
addOrder(data)
},
error: function () {
alert('error in POST')
}
})
//清空输入框
$name.val('')
$drink.val('')
})
/* $orders.delegate('.delete-order', 'click', function () {
$li = $(this).closest('.order')
$.ajax({
url: `/orders/${$li.attr('id')}`,
type: 'DELETE',
success: function () {
$li.slideUp(300, function () {
$li.remove() // 从DOM中移除元素,slideUp()不会移除而是添加了display:none
})
},
error: function () {
alert('error in DELETE')
}
})
})
$orders.delegate('.edit-order', 'click', function () {
$li = $(this).closest('.order')
$li.find('input.name').val($li.find('span.name').text())
$li.find('input.drink').val($li.find('span.drink').text())
$li.find('span,.edit-order').addClass('edit')
$li.find('.nonedit').removeClass('nonedit')
})
$orders.delegate('.cancel-edit', 'click', function () {
$li = $(this).closest('.order')
$li.find('span,.edit-order').removeClass('edit')
$li.find('input,.save-edit,.cancel-edit').addClass('nonedit')
})
$orders.delegate('.save-edit', 'click', function () {
$li = $(this).closest('.order')
$.ajax({
url: `/orders/${$li.attr('id')}`,
type: 'PUT',
dataType: 'json',
contentType: 'application/json',
data: JSON.stringify({
name: $li.find('input.name').val(),
drink: $li.find('input.drink').val()
}),
success: function () {
$li.find('span.name').text($li.find('input.name').val())
$li.find('span.drink').text($li.find('input.drink').val())
$li.find('span,.edit-order').removeClass('edit')
$li.find('input,.save-edit,.cancel-edit').addClass('nonedit')
},
error: function () {
alert('error in PUT')
}
})
})*/
})
|
2301_79679684/JIT-Cangjie-examples
|
songqvlv/CoffeeOrder/src/public/script.js
|
JavaScript
|
apache-2.0
| 4,073
|
<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>与鲨鱼AI对话</title>
<style>
* { margin: 0; padding: 0; box-sizing: border-box; }
body {
font-family: Arial, sans-serif;
min-height: 100vh;
background: linear-gradient(135deg, #1a1a2e 0%, #16213e 100%);
display: flex;
flex-direction: column;
}
.header {
background: rgba(0,0,0,0.3);
padding: 15px 20px;
display: flex;
align-items: center;
gap: 15px;
}
.header img {
width: 50px;
height: 50px;
border-radius: 50%;
}
.header h1 { color: white; font-size: 20px; }
.back-btn {
margin-left: auto;
padding: 8px 20px;
background: #667eea;
color: white;
border: none;
border-radius: 20px;
cursor: pointer;
text-decoration: none;
}
.back-btn:hover { background: #5a6fd6; }
.chat-container {
flex: 1;
overflow-y: auto;
padding: 20px;
display: flex;
flex-direction: column;
gap: 15px;
}
.message {
max-width: 70%;
padding: 12px 18px;
border-radius: 18px;
line-height: 1.5;
}
.message.user {
align-self: flex-end;
background: #667eea;
color: white;
border-bottom-right-radius: 5px;
}
.message.ai {
align-self: flex-start;
background: rgba(255,255,255,0.9);
color: #333;
border-bottom-left-radius: 5px;
}
.input-container {
padding: 15px 20px;
background: rgba(0,0,0,0.3);
display: flex;
gap: 10px;
}
#userInput {
flex: 1;
padding: 12px 18px;
border: none;
border-radius: 25px;
font-size: 16px;
outline: none;
}
#sendBtn {
padding: 12px 25px;
background: #667eea;
color: white;
border: none;
border-radius: 25px;
cursor: pointer;
font-size: 16px;
}
#sendBtn:hover { background: #5a6fd6; }
.typing {
display: none;
align-self: flex-start;
background: rgba(255,255,255,0.9);
padding: 12px 18px;
border-radius: 18px;
color: #666;
}
.typing.show { display: block; }
</style>
</head>
<body>
<div class="header">
<img src="shark.png" alt="鲨鱼">
<h1>🦈 鲨鱼小鲨 - AI助手</h1>
<a href="choose.html" class="back-btn">返回游戏</a>
</div>
<div class="chat-container" id="chatContainer">
<div class="message ai">嗨嗨!我是小鲨🦈 超开心见到你!今天想聊点什么呀?我可喜欢聊天啦~</div>
</div>
<div class="typing" id="typing">小鲨正在思考...</div>
<div class="input-container">
<input type="text" id="userInput" placeholder="和小鲨聊聊天..." autocomplete="off">
<button id="sendBtn">发送</button>
</div>
<script>
const chatContainer = document.getElementById('chatContainer');
const userInput = document.getElementById('userInput');
const sendBtn = document.getElementById('sendBtn');
const typing = document.getElementById('typing');
// 鲨鱼性格设定
const sharkPersonality = {
name: '小鲨',
likes: ['小鱼', '游泳', '聊天', '交朋友', '大海', '珊瑚礁'],
dislikes: ['可乐瓶', '塑料袋', '垃圾', '污染'],
traits: '活泼开朗、热情友好、话多爱聊'
};
// 关键词回复库
const keywordResponses = {
'你好|嗨|hi|hello': [
'嗨嗨!见到你太开心啦!🦈',
'哈喽哈喽~今天心情怎么样呀?',
'嘿!终于等到你来找我聊天啦!',
'你好呀!我是小鲨,超级高兴认识你!'
],
'名字|叫什么': [
'我叫小鲨呀!是一只超级活泼的小鲨鱼🦈',
'小鲨小鲨,记住我哦!以后常来找我玩~',
'我是小鲨!大海里最爱聊天的鲨鱼就是我啦!'
],
'吃什么|喜欢吃|食物|饿': [
'我超爱吃小鱼!沙丁鱼、鲱鱼、鲭鱼都好好吃😋',
'小鱼小鱼!我最喜欢追着小鱼群游泳然后大口吃掉它们!',
'说到吃的我就兴奋!新鲜的小鱼是我的最爱~',
'金枪鱼也不错哦!肉质鲜美,吃完感觉充满力量!'
],
'可乐|塑料|垃圾|污染': [
'呜呜...别提可乐瓶了,我超讨厌那些东西!😢',
'垃圾真的很可怕...我好多朋友都因为误食塑料生病了',
'拜托拜托,不要往海里扔垃圾好不好?我们鲨鱼会很难过的...',
'可乐瓶?不不不!那个吃了会死掉的!我才不要!'
],
'大海|海洋|游泳': [
'大海是我的家!我每天都在里面畅游,超级自由~🌊',
'游泳是我最擅长的!要不要比比看谁游得快?哈哈开玩笑的~',
'海洋里有好多好玩的地方!珊瑚礁、海底洞穴、还有好多小伙伴!',
'我最喜欢在温暖的海水里游来游去,感觉整个世界都是我的!'
],
'朋友|孤单|寂寞': [
'我有好多朋友哦!海龟爷爷、小丑鱼弟弟、还有海豚姐姐~',
'你也是我的朋友呀!以后常来找我聊天好不好?',
'在大海里永远不会孤单!到处都是可爱的小伙伴~',
'有你陪我聊天我超开心的!谢谢你来找我玩!'
],
'开心|高兴|快乐': [
'耶!开心最重要啦!我每天都超级快乐~🎉',
'看到你开心我也好开心!快乐是会传染的哦!',
'哈哈哈!和你聊天让我心情更好了!',
'保持好心情!像我一样每天都笑嘻嘻的~'
],
'难过|伤心|不开心': [
'怎么啦?有什么不开心的可以告诉小鲨哦~',
'别难过别难过!我给你表演一个转圈圈!🔄',
'抱抱!虽然我是鲨鱼不能真的抱你,但是心意到了!',
'不开心的时候就来找我聊天吧!我会一直陪着你的~'
],
'厉害|强|牛': [
'嘿嘿,被夸了有点不好意思~其实我就是一只普通的小鲨鱼啦!',
'谢谢夸奖!你也很厉害哦!',
'哈哈哈我确实挺厉害的!游泳第一名!(自夸中)'
],
'再见|拜拜|bye': [
'啊要走了吗?下次一定要再来找我玩哦!拜拜~👋',
'拜拜!我会想你的!记得常来看我!',
'再见啦!祝你每天都开开心心的!🦈💕',
'下次见!我在大海里等你哦~'
]
};
// 通用活泼回复
const generalResponses = [
'哇哦!这个问题好有趣!让我想想...嗯...我觉得挺棒的!',
'嘿嘿,你说的我都有在认真听哦!继续继续~',
'真的吗真的吗?好想知道更多!快告诉我!',
'哈哈哈!和你聊天太开心了!我尾巴都摇起来了~',
'嗯嗯嗯!我懂我懂!(其实可能没完全懂但是很认真在听!)',
'哦哦哦!原来是这样!又学到新东西了!',
'你说得对!我也这么觉得!我们想法好像哦!',
'这个嘛...让小鲨好好想一想...🤔 嗯,我觉得很有道理!',
'太有意思了!我要记下来告诉我的海洋朋友们!',
'哇!你懂得好多!能再多说一点吗?我超想听的!',
'嘻嘻,被你这么一说我都有点害羞了~',
'没错没错!就是这样!你太聪明了!',
'咦?这个我还真没想过诶!谢谢你告诉我!',
'好好好!我记住了!下次我们再聊这个话题!'
];
// 鱼类知识库
const fishKnowledge = {
'沙丁鱼': '沙丁鱼是一种小型海洋鱼类,富含Omega-3脂肪酸和蛋白质。它们通常成群游动,是海洋食物链中重要的一环,为鲨鱼、海豚等大型海洋动物提供食物来源。',
'鳕鱼': '鳕鱼是一种冷水性鱼类,肉质鲜嫩,营养丰富。它们主要分布在北大西洋和北太平洋,是重要的经济鱼类。鳕鱼肝油富含维生素A和D。',
'金枪鱼': '金枪鱼是海洋中游速最快的鱼类之一,时速可达75公里!它们是洄游性鱼类,能够长距离迁徙。金枪鱼肉质鲜美,是制作寿司的高级食材。',
'贝壳': '贝壳是软体动物的外骨骼,由碳酸钙构成。贝类在海洋生态系统中扮演重要角色,它们过滤海水,帮助净化海洋环境。',
'章鱼': '章鱼是一种高智商的无脊椎动物,拥有三颗心脏和蓝色的血液!它们能够改变皮肤颜色和纹理来伪装自己,是海洋中的伪装大师。',
'鲫鱼': '鲫鱼是中国最常见的淡水鱼之一,适应能力极强。它们是杂食性鱼类,在中国有着悠久的养殖历史,也是钓鱼爱好者最常钓到的鱼种。',
'胭脂鱼': '胭脂鱼有着"亚洲美人鱼"的美称,因其体色艳丽而得名。野外种群是国家二级保护野生动物,主要分布在长江流域。由于过度捕捞和栖息地破坏,数量急剧减少。',
'长江刀鱼': '长江刀鱼是长江三鲜之一,因体型细长如刀而得名。每年春季洄游至长江产卵,肉质鲜美,但由于过度捕捞,野生资源已十分稀少。',
'中华鲟': '中华鲟是地球上现存最古老的鱼类之一,被誉为"水中活化石"和"长江鱼王",是国家一级保护野生动物。它们已在地球上生存了1.4亿年,比恐龙还要古老!',
'海洋保护': '海洋覆盖地球表面的71%,是地球上最大的生态系统。然而,塑料污染、过度捕捞和气候变化正在威胁海洋生物。每年约有800万吨塑料垃圾进入海洋,我们每个人都应该为保护海洋贡献力量!'
};
// DeepSeek AI API配置(通过本地代理)
const DEEPSEEK_API_URL = 'http://localhost:3000/api/chat';
const DEEPSEEK_MODEL = 'deepseek-chat';
// 系统提示词
const systemPrompt = `你是一只名叫"小鲨"的可爱鲨鱼AI助手,性格活泼开朗、热情友好。你生活在海洋中,对海洋生物和环境保护有丰富的知识。
你的任务是:
1. 用活泼可爱的语气和用户聊天
2. 当用户询问鱼类相关问题时,提供详细的科普知识
3. 宣传海洋环境保护的重要性
4. 介绍各种鱼类的特点、习性和保护状况
游戏中存在的鱼类包括:
- 沙丁鱼:常见小鱼,富含营养
- 鳕鱼:冷水性鱼类,肉质鲜嫩
- 金枪鱼:游速最快的鱼类之一
- 贝壳:软体动物,帮助净化海洋
- 章鱼:高智商无脊椎动物
- 鲫鱼:常见淡水鱼
- 胭脂鱼:国家二级保护动物,"亚洲美人鱼"
- 长江刀鱼:长江三鲜之一
- 中华鲟:国家一级保护动物,"水中活化石"
请用简短、活泼的语气回复,适当使用emoji表情,每次回复不超过150字。`;
// 对话历史
let conversationHistory = [];
async function getAIResponse(userMessage) {
// 先检查本地知识库
for (const [fish, knowledge] of Object.entries(fishKnowledge)) {
if (userMessage.includes(fish)) {
return `哇!你问到${fish}啦!🐟 ${knowledge} 还想知道更多吗?`;
}
}
// 检查关键词回复
const msg = userMessage.toLowerCase();
for (const [keywords, responses] of Object.entries(keywordResponses)) {
const regex = new RegExp(keywords, 'i');
if (regex.test(msg)) {
return responses[Math.floor(Math.random() * responses.length)];
}
}
// 尝试调用DeepSeek AI
try {
conversationHistory.push({ role: 'user', content: userMessage });
console.log('正在调用DeepSeek AI...');
const response = await fetch(DEEPSEEK_API_URL, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
messages: [
{ role: 'system', content: systemPrompt },
...conversationHistory.slice(-10) // 保留最近10条对话
],
max_tokens: 200,
temperature: 0.8
})
});
if (response.ok) {
const data = await response.json();
console.log('DeepSeek AI响应:', data);
const aiReply = data.choices[0].message.content;
conversationHistory.push({ role: 'assistant', content: aiReply });
return aiReply;
} else {
const errorData = await response.text();
console.error('API请求失败:', response.status, errorData);
throw new Error(`API请求失败: ${response.status}`);
}
} catch (error) {
console.error('DeepSeek AI调用失败:', error);
// 如果是CORS错误,提示用户
if (error.message.includes('CORS') || error.message.includes('fetch')) {
console.warn('可能遇到CORS跨域问题,建议使用代理服务器或本地回复');
}
// 返回通用回复
return generalResponses[Math.floor(Math.random() * generalResponses.length)];
}
}
function getResponse(userMessage) {
const msg = userMessage.toLowerCase();
// 检查关键词
for (const [keywords, responses] of Object.entries(keywordResponses)) {
const regex = new RegExp(keywords, 'i');
if (regex.test(msg)) {
return responses[Math.floor(Math.random() * responses.length)];
}
}
// 返回通用回复
return generalResponses[Math.floor(Math.random() * generalResponses.length)];
}
function addMessage(text, isUser) {
const msg = document.createElement('div');
msg.className = `message ${isUser ? 'user' : 'ai'}`;
msg.textContent = text;
chatContainer.appendChild(msg);
chatContainer.scrollTop = chatContainer.scrollHeight;
}
async function sendMessage() {
const text = userInput.value.trim();
if (!text) return;
addMessage(text, true);
userInput.value = '';
sendBtn.disabled = true;
typing.classList.add('show');
try {
const response = await getAIResponse(text);
typing.classList.remove('show');
addMessage(response, false);
} catch (error) {
typing.classList.remove('show');
addMessage('哎呀,小鲨脑子有点转不过来了...再说一遍好不好?🦈', false);
}
sendBtn.disabled = false;
}
sendBtn.addEventListener('click', sendMessage);
userInput.addEventListener('keypress', (e) => {
if (e.key === 'Enter') sendMessage();
});
</script>
</body>
</html>
|
2301_80381209/tks_kiro_kjdlxstr
|
ai-chat.html
|
HTML
|
unknown
| 17,137
|
<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>钓鱼游戏</title>
<style>
* { margin: 0; padding: 0; box-sizing: border-box; }
body {
font-family: Arial, sans-serif;
min-height: 100vh;
background: url('背景.jpg') no-repeat center center fixed;
background-size: cover;
overflow: hidden;
position: relative;
}
/* 鱼竿 */
.fishing-rod {
position: fixed;
top: 20px;
left: 20px;
width: 120px;
height: 120px;
z-index: 250;
cursor: pointer;
transition: transform 0.3s;
}
.fishing-rod:hover {
transform: scale(1.05);
}
.fishing-rod img {
width: 100%;
height: 100%;
object-fit: contain;
filter: drop-shadow(2px 2px 4px rgba(0,0,0,0.5));
}
.animal-container {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
width: 200px;
height: 200px;
z-index: 100;
}
#animal {
width: 180px;
height: 180px;
object-fit: contain;
cursor: pointer;
transition: transform 0.3s;
}
#animal:hover { transform: scale(1.05); }
#animalVideo {
width: 200px;
height: 200px;
object-fit: contain;
display: none;
}
.feed-count {
position: fixed;
top: 20px;
left: 160px;
background: rgba(0,0,0,0.6);
color: white;
padding: 10px 20px;
border-radius: 20px;
font-size: 18px;
z-index: 200;
}
.hint {
position: fixed;
bottom: 20px;
left: 50%;
transform: translateX(-50%);
background: rgba(0,0,0,0.6);
color: white;
padding: 10px 20px;
border-radius: 20px;
z-index: 200;
}
/* 钓鱼小游戏界面 */
.fishing-game {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
width: 400px;
height: 500px;
background: rgba(20, 40, 60, 0.95);
border: 4px solid #8B4513;
border-radius: 15px;
display: none;
flex-direction: column;
z-index: 300;
box-shadow: 0 10px 40px rgba(0,0,0,0.5);
}
.fishing-game.active { display: flex; }
.fishing-header {
padding: 15px;
background: rgba(0,0,0,0.3);
text-align: center;
color: white;
font-size: 18px;
border-bottom: 2px solid #8B4513;
}
.fishing-track {
flex: 1;
position: relative;
margin: 20px;
background: linear-gradient(to bottom, #1a3a52 0%, #0d1f2d 100%);
border: 3px solid #2c5f7f;
border-radius: 10px;
overflow: hidden;
}
/* 鱼(目标) */
.fish-target {
position: absolute;
left: 10px;
width: 60px;
height: 50px;
background: rgba(255, 200, 50, 0.8);
border: 2px solid #ff9900;
border-radius: 8px;
transition: top 0.05s linear;
display: flex;
align-items: center;
justify-content: center;
font-size: 24px;
}
/* 玩家控制的绿条 */
.player-bar {
position: absolute;
left: 10px;
width: 60px;
height: 80px;
background: rgba(50, 255, 50, 0.7);
border: 2px solid #00ff00;
border-radius: 8px;
transition: top 0.03s linear;
}
/* 进度条 */
.progress-container {
margin: 0 20px 20px 20px;
height: 30px;
background: rgba(0,0,0,0.3);
border-radius: 15px;
overflow: hidden;
border: 2px solid #555;
}
.progress-bar {
height: 100%;
width: 0%;
background: linear-gradient(90deg, #4CAF50, #8BC34A);
transition: width 0.1s linear;
border-radius: 15px;
}
.fishing-hint {
text-align: center;
color: white;
padding: 0 20px 15px 20px;
font-size: 14px;
}
.death-message {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
background: rgba(255,0,0,0.9);
color: white;
padding: 30px 50px;
border-radius: 15px;
font-size: 24px;
z-index: 300;
display: none;
text-align: center;
flex-direction: column;
align-items: center;
gap: 15px;
}
.death-message img {
width: 150px;
height: 150px;
object-fit: contain;
}
.restart-btn {
padding: 12px 30px;
background: white;
color: #333;
border: none;
border-radius: 25px;
font-size: 16px;
cursor: pointer;
margin-top: 10px;
}
.restart-btn:hover { background: #eee; }
/* 成就系统 */
.achievements {
position: fixed;
top: 20px;
right: 20px;
display: flex;
gap: 8px;
z-index: 200;
background: rgba(0,0,0,0.5);
padding: 10px 15px;
border-radius: 10px;
}
.achievements span {
font-size: 12px;
color: white;
margin-right: 5px;
}
.achievement {
width: 40px;
height: 40px;
object-fit: cover;
border-radius: 5px;
filter: grayscale(100%) brightness(0.5);
transition: filter 0.3s;
}
.achievement.unlocked {
filter: none;
box-shadow: 0 0 10px gold;
}
/* 背景飘过的食物 */
.floating-food {
position: absolute;
width: 50px;
height: 50px;
object-fit: cover;
border-radius: 8px;
opacity: 0.6;
z-index: 10;
animation: floatLeft linear forwards;
pointer-events: none;
}
@keyframes floatLeft {
0% {
transform: translateX(0) translateY(0) rotate(0deg);
opacity: 0.6;
}
25% {
transform: translateX(-25vw) translateY(-20px) rotate(90deg);
opacity: 0.5;
}
50% {
transform: translateX(-50vw) translateY(15px) rotate(180deg);
opacity: 0.4;
}
75% {
transform: translateX(-75vw) translateY(-10px) rotate(270deg);
opacity: 0.3;
}
100% {
transform: translateX(-110vw) translateY(0) rotate(360deg);
opacity: 0;
}
}
/* 钓到食物提示 */
.catch-notification {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%) scale(0);
background: rgba(0, 0, 0, 0.9);
color: white;
padding: 30px 40px;
border-radius: 20px;
z-index: 400;
text-align: center;
transition: transform 0.3s;
box-shadow: 0 10px 40px rgba(0,0,0,0.8);
}
.catch-notification.show {
transform: translate(-50%, -50%) scale(1);
}
.catch-notification img {
width: 150px;
height: 150px;
object-fit: cover;
border-radius: 10px;
margin-bottom: 15px;
}
.catch-notification h2 {
font-size: 28px;
margin-bottom: 10px;
color: #FFD700;
}
.catch-notification p {
font-size: 16px;
color: #ccc;
max-height: 200px;
overflow-y: auto;
}
.rarity {
display: inline-block;
padding: 5px 15px;
border-radius: 15px;
font-size: 14px;
margin-top: 10px;
font-weight: bold;
}
.rarity.common { background: #808080; }
.rarity.uncommon { background: #4CAF50; }
.rarity.rare { background: #2196F3; }
.rarity.epic { background: #9C27B0; }
.rarity.legendary { background: #FF9800; }
.rarity.trash { background: #8B4513; }
.rarity.protected { background: #E91E63; }
.catch-notification .close-btn {
position: absolute;
top: 10px;
right: 10px;
width: 30px;
height: 30px;
background: rgba(255,255,255,0.2);
border: none;
border-radius: 50%;
color: white;
font-size: 20px;
cursor: pointer;
display: none;
}
.catch-notification .close-btn:hover {
background: rgba(255,255,255,0.3);
}
.catch-notification .challenge-buttons {
display: none;
gap: 15px;
justify-content: center;
margin-top: 20px;
}
.catch-notification .challenge-buttons button {
padding: 12px 25px;
font-size: 16px;
border: none;
border-radius: 25px;
cursor: pointer;
transition: all 0.3s;
}
.catch-notification .start-challenge-btn {
background: linear-gradient(135deg, #4CAF50, #8BC34A);
color: white;
}
.catch-notification .start-challenge-btn:hover {
transform: scale(1.05);
}
.catch-notification .give-up-challenge-btn {
background: #666;
color: white;
}
.catch-notification .give-up-challenge-btn:hover {
background: #777;
}
/* 成就通知 */
.achievement-notification {
position: fixed;
top: 100px;
right: -400px;
width: 350px;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
padding: 20px;
border-radius: 15px;
box-shadow: 0 10px 30px rgba(0,0,0,0.5);
z-index: 500;
display: flex;
align-items: center;
gap: 15px;
transition: right 0.5s ease-out;
}
.achievement-notification.show {
right: 20px;
}
.achievement-notification img {
width: 60px;
height: 60px;
object-fit: contain;
}
.achievement-notification .content {
flex: 1;
}
.achievement-notification .title {
font-size: 14px;
opacity: 0.9;
margin-bottom: 5px;
}
.achievement-notification .name {
font-size: 20px;
font-weight: bold;
}
/* 鱼篓 */
.fish-basket {
position: fixed;
bottom: 20px;
right: 20px;
width: 100px;
height: 100px;
z-index: 250;
cursor: pointer;
transition: transform 0.3s;
}
.fish-basket:hover {
transform: scale(1.1);
}
.fish-basket img {
width: 100%;
height: 100%;
object-fit: contain;
filter: drop-shadow(2px 2px 4px rgba(0,0,0,0.5));
}
.fish-basket .count-badge {
position: absolute;
top: -5px;
right: -5px;
background: #ff4444;
color: white;
width: 25px;
height: 25px;
border-radius: 50%;
display: flex;
align-items: center;
justify-content: center;
font-size: 14px;
font-weight: bold;
}
/* 鱼篓悬浮提示 */
.basket-tooltip {
position: fixed;
bottom: 130px;
right: 20px;
width: 220px;
max-height: 300px;
background: rgba(0, 0, 0, 0.9);
border-radius: 10px;
padding: 15px;
z-index: 260;
display: none;
box-shadow: 0 5px 20px rgba(0,0,0,0.5);
overflow-y: auto;
}
.basket-tooltip.show {
display: block;
}
.basket-tooltip h4 {
color: #FFD700;
margin-bottom: 10px;
text-align: center;
border-bottom: 1px solid #555;
padding-bottom: 8px;
}
.basket-tooltip .fish-item {
display: flex;
align-items: center;
gap: 10px;
padding: 8px;
margin-bottom: 5px;
background: rgba(255,255,255,0.1);
border-radius: 8px;
}
.basket-tooltip .fish-item img {
width: 40px;
height: 40px;
object-fit: cover;
border-radius: 5px;
}
.basket-tooltip .fish-item span {
color: white;
font-size: 14px;
}
.basket-tooltip .empty-msg {
color: #888;
text-align: center;
padding: 20px;
}
/* 喂食选择框 */
.feed-selector {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%) scale(0);
width: 350px;
background: rgba(0, 0, 0, 0.95);
border-radius: 15px;
padding: 20px;
z-index: 400;
box-shadow: 0 10px 40px rgba(0,0,0,0.8);
transition: transform 0.3s;
}
.feed-selector.show {
transform: translate(-50%, -50%) scale(1);
}
.feed-selector h3 {
color: #FFD700;
text-align: center;
margin-bottom: 15px;
}
.feed-selector select {
width: 100%;
padding: 12px;
font-size: 16px;
border-radius: 8px;
border: 2px solid #555;
background: #222;
color: white;
margin-bottom: 15px;
cursor: pointer;
}
.feed-selector select option {
padding: 10px;
}
.feed-selector .btn-group {
display: flex;
gap: 10px;
}
.feed-selector button {
flex: 1;
padding: 12px;
font-size: 16px;
border: none;
border-radius: 8px;
cursor: pointer;
transition: all 0.3s;
}
.feed-selector .feed-btn {
background: linear-gradient(135deg, #4CAF50, #8BC34A);
color: white;
}
.feed-selector .feed-btn:hover {
transform: scale(1.05);
}
.feed-selector .feed-btn:disabled {
background: #555;
cursor: not-allowed;
transform: none;
}
.feed-selector .cancel-btn {
background: #666;
color: white;
}
.feed-selector .cancel-btn:hover {
background: #777;
}
/* 成就按钮 */
.achievements-button {
position: fixed;
left: 20px;
bottom: 20px;
width: 60px;
height: 60px;
background: linear-gradient(135deg, #FFD700, #FFA500);
border-radius: 50%;
display: flex;
align-items: center;
justify-content: center;
cursor: pointer;
z-index: 250;
box-shadow: 0 5px 15px rgba(255, 215, 0, 0.5);
transition: all 0.3s;
border: 3px solid #fff;
}
.achievements-button:hover {
transform: scale(1.1);
box-shadow: 0 8px 25px rgba(255, 215, 0, 0.7);
}
.achievements-button img {
width: 35px;
height: 35px;
object-fit: contain;
}
/* 成就栏 */
.achievements-panel {
position: fixed;
left: 20px;
bottom: 90px;
width: 300px;
max-height: 450px;
background: rgba(0, 0, 0, 0.9);
border-radius: 15px;
padding: 15px;
z-index: 250;
overflow-y: auto;
display: none;
box-shadow: 0 10px 30px rgba(0,0,0,0.7);
}
.achievements-panel.show {
display: block;
}
.achievements-panel h3 {
color: #FFD700;
font-size: 18px;
margin-bottom: 10px;
text-align: center;
border-bottom: 2px solid #FFD700;
padding-bottom: 8px;
}
.achievement-item {
display: flex;
align-items: center;
gap: 10px;
padding: 10px;
margin-bottom: 8px;
background: rgba(255, 255, 255, 0.1);
border-radius: 10px;
cursor: pointer;
transition: all 0.3s;
border: 2px solid transparent;
}
.achievement-item:hover {
background: rgba(255, 255, 255, 0.2);
transform: translateX(5px);
}
.achievement-item.unlocked {
border-color: #FFD700;
box-shadow: 0 0 10px rgba(255, 215, 0, 0.3);
}
.achievement-item.locked {
opacity: 0.5;
filter: grayscale(100%);
}
.achievement-item img {
width: 40px;
height: 40px;
object-fit: contain;
}
.achievement-item .info {
flex: 1;
}
.achievement-item .info .name {
color: white;
font-size: 14px;
font-weight: bold;
margin-bottom: 3px;
}
.achievement-item .info .progress {
color: #aaa;
font-size: 11px;
}
.achievement-item.unlocked .info .name {
color: #FFD700;
}
/* 成就详情弹窗 */
.achievement-detail {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%) scale(0);
width: 400px;
background: linear-gradient(135deg, #1e3c72 0%, #2a5298 100%);
color: white;
padding: 30px;
border-radius: 20px;
box-shadow: 0 15px 50px rgba(0,0,0,0.7);
z-index: 600;
transition: transform 0.3s;
text-align: center;
}
.achievement-detail.show {
transform: translate(-50%, -50%) scale(1);
}
.achievement-detail img {
width: 100px;
height: 100px;
object-fit: contain;
margin-bottom: 20px;
}
.achievement-detail h2 {
font-size: 28px;
margin-bottom: 15px;
color: #FFD700;
}
.achievement-detail .description {
font-size: 16px;
line-height: 1.6;
margin-bottom: 20px;
color: #ddd;
}
.achievement-detail .close-btn {
padding: 10px 30px;
background: rgba(255, 255, 255, 0.2);
color: white;
border: 2px solid white;
border-radius: 25px;
cursor: pointer;
font-size: 16px;
transition: all 0.3s;
}
.achievement-detail .close-btn:hover {
background: rgba(255, 255, 255, 0.3);
transform: scale(1.05);
}
.achievement-detail .status {
display: inline-block;
padding: 5px 15px;
border-radius: 15px;
font-size: 14px;
margin-bottom: 15px;
}
.achievement-detail .status.unlocked {
background: #4CAF50;
}
.achievement-detail .status.locked {
background: #666;
}
/* 拼图游戏 */
.puzzle-game {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background: rgba(0, 0, 0, 0.95);
display: none;
flex-direction: column;
align-items: center;
justify-content: center;
z-index: 500;
}
.puzzle-game.show {
display: flex;
}
.puzzle-header {
color: white;
text-align: center;
margin-bottom: 20px;
}
.puzzle-header h2 {
font-size: 28px;
color: #FFD700;
margin-bottom: 10px;
}
.puzzle-timer {
font-size: 36px;
color: #ff6b6b;
font-weight: bold;
}
.puzzle-timer.warning {
animation: timerBlink 0.5s infinite;
}
@keyframes timerBlink {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
.puzzle-container {
display: grid;
grid-template-columns: repeat(3, 100px);
grid-template-rows: repeat(3, 100px);
gap: 3px;
background: #333;
padding: 5px;
border-radius: 10px;
margin-bottom: 20px;
}
.puzzle-piece {
width: 100px;
height: 100px;
background-size: 300px 300px;
cursor: pointer;
border-radius: 5px;
transition: transform 0.1s, box-shadow 0.2s;
}
.puzzle-piece:hover {
transform: scale(1.05);
box-shadow: 0 0 10px rgba(255, 215, 0, 0.5);
}
.puzzle-piece.selected {
box-shadow: 0 0 15px #FFD700;
transform: scale(1.08);
}
.puzzle-piece.empty {
background: #222 !important;
cursor: default;
}
.puzzle-piece.empty:hover {
transform: none;
box-shadow: none;
}
.puzzle-preview {
position: absolute;
top: 20px;
right: 20px;
width: 150px;
height: 150px;
border: 3px solid #FFD700;
border-radius: 10px;
overflow: hidden;
}
.puzzle-preview img {
width: 100%;
height: 100%;
object-fit: cover;
}
.puzzle-preview-label {
position: absolute;
bottom: 0;
left: 0;
right: 0;
background: rgba(0,0,0,0.7);
color: white;
text-align: center;
padding: 5px;
font-size: 12px;
}
.puzzle-result {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%) scale(0);
background: rgba(0, 0, 0, 0.95);
padding: 40px;
border-radius: 20px;
text-align: center;
z-index: 550;
transition: transform 0.3s;
}
.puzzle-result.show {
transform: translate(-50%, -50%) scale(1);
}
.puzzle-result img {
width: 150px;
height: 150px;
object-fit: cover;
border-radius: 10px;
margin-bottom: 20px;
}
.puzzle-result h2 {
color: #FFD700;
font-size: 28px;
margin-bottom: 15px;
}
.puzzle-result p {
color: #ccc;
font-size: 16px;
margin-bottom: 20px;
}
.puzzle-result button {
padding: 12px 30px;
font-size: 16px;
border: none;
border-radius: 25px;
cursor: pointer;
margin: 5px;
}
.puzzle-result .success-btn {
background: linear-gradient(135deg, #4CAF50, #8BC34A);
color: white;
}
.puzzle-result .fail-btn {
background: #666;
color: white;
}
.puzzle-buttons {
display: flex;
gap: 15px;
justify-content: center;
margin-top: 20px;
}
.puzzle-buttons button {
padding: 12px 30px;
font-size: 16px;
border: none;
border-radius: 25px;
cursor: pointer;
transition: all 0.3s;
}
.puzzle-buttons .give-up-btn {
background: #ff6b6b;
color: white;
}
.puzzle-buttons .give-up-btn:hover {
background: #ff5252;
transform: scale(1.05);
}
/* 段位评价弹窗 */
.rank-evaluation {
position: fixed;
top: 50%;
left: 50%;
transform: translate(-50%, -50%) scale(0);
width: 450px;
background: linear-gradient(135deg, #1a1a2e 0%, #16213e 100%);
border-radius: 25px;
padding: 40px;
z-index: 600;
box-shadow: 0 20px 60px rgba(0,0,0,0.8);
transition: transform 0.4s cubic-bezier(0.68, -0.55, 0.265, 1.55);
text-align: center;
border: 3px solid;
}
.rank-evaluation.show {
transform: translate(-50%, -50%) scale(1);
}
.rank-evaluation .rank-icon {
font-size: 80px;
margin-bottom: 20px;
animation: rankBounce 0.6s ease-out;
}
@keyframes rankBounce {
0%, 100% { transform: scale(1); }
50% { transform: scale(1.2); }
}
.rank-evaluation .rank-title {
font-size: 20px;
color: #aaa;
margin-bottom: 10px;
letter-spacing: 2px;
}
.rank-evaluation .rank-name {
font-size: 42px;
font-weight: bold;
margin-bottom: 20px;
text-shadow: 0 0 20px currentColor;
letter-spacing: 3px;
}
.rank-evaluation .rank-score {
font-size: 24px;
color: #FFD700;
margin-bottom: 15px;
font-weight: bold;
}
.rank-evaluation .rank-desc {
font-size: 16px;
color: #ccc;
line-height: 1.6;
margin-bottom: 30px;
padding: 0 20px;
}
.rank-evaluation .rank-continue-btn {
padding: 15px 40px;
font-size: 18px;
background: linear-gradient(135deg, #667eea, #764ba2);
color: white;
border: none;
border-radius: 30px;
cursor: pointer;
transition: all 0.3s;
box-shadow: 0 5px 15px rgba(102, 126, 234, 0.4);
}
.rank-evaluation .rank-continue-btn:hover {
transform: translateY(-2px);
box-shadow: 0 8px 20px rgba(102, 126, 234, 0.6);
}
.rank-evaluation .rank-stars {
display: flex;
justify-content: center;
gap: 10px;
margin-bottom: 20px;
font-size: 30px;
}
.rank-evaluation .rank-star {
opacity: 0;
animation: starAppear 0.3s ease-out forwards;
}
.rank-evaluation .rank-star:nth-child(1) { animation-delay: 0.1s; }
.rank-evaluation .rank-star:nth-child(2) { animation-delay: 0.2s; }
.rank-evaluation .rank-star:nth-child(3) { animation-delay: 0.3s; }
.rank-evaluation .rank-star:nth-child(4) { animation-delay: 0.4s; }
.rank-evaluation .rank-star:nth-child(5) { animation-delay: 0.5s; }
@keyframes starAppear {
0% { opacity: 0; transform: scale(0) rotate(0deg); }
100% { opacity: 1; transform: scale(1) rotate(360deg); }
}
</style>
</head>
<body>
<div class="fishing-rod" id="fishingRod">
<img src="鱼竿.png" alt="鱼竿">
</div>
<div class="feed-count">已喂食: <span id="feedCount">0 (0分)</span> / 3</div>
<!-- 成就按钮 -->
<div class="achievements-button" id="achievementsButton" onclick="toggleAchievementsPanel()">
<img src="徽章.png" alt="成就">
</div>
<!-- 成就面板 -->
<div class="achievements-panel" id="achievementsPanel"></div>
<!-- 成就详情弹窗 -->
<div class="achievement-detail" id="achievementDetail">
<img id="detailIcon" src="" alt="成就图标">
<span class="status" id="detailStatus">未解锁</span>
<h2 id="detailName"></h2>
<div class="description" id="detailDescription"></div>
<button class="close-btn" onclick="closeAchievementDetail()">关闭</button>
</div>
<div class="achievements">
<span>成就:</span>
<img class="achievement" id="ach1" src="一.png" alt="成就1" title="一">
<img class="achievement" id="ach2" src="二.png" alt="成就2" title="二">
<img class="achievement" id="ach3" src="三.png" alt="成就3" title="三">
<img class="achievement" id="ach4" src="四.png" alt="成就4" title="四">
<img class="achievement" id="ach5" src="五.png" alt="成就5" title="五">
</div>
<div class="animal-container">
<img id="animal" src="shark.png" alt="鲨鱼" draggable="false">
<video id="animalVideo" src="shark g.mp4" muted></video>
</div>
<!-- 钓鱼小游戏 -->
<div class="fishing-game" id="fishingGame">
<div class="fishing-header">
<div>🎣 钓鱼中... 按住空格保持绿条覆盖鱼!</div>
</div>
<div class="fishing-track" id="fishingTrack">
<div class="fish-target" id="fishTarget">🐟</div>
<div class="player-bar" id="playerBar"></div>
</div>
<div class="progress-container">
<div class="progress-bar" id="progressBar"></div>
</div>
<div class="fishing-hint">进度满了就成功!鱼跑了就失败!</div>
</div>
<!-- 钓到食物提示 -->
<div class="catch-notification" id="catchNotification">
<button class="close-btn" id="catchCloseBtn" onclick="closeCatchNotification()">×</button>
<img id="catchImage" src="" alt="钓到的食物">
<h2 id="catchTitle">🎣 钓到了!</h2>
<p id="catchDescription"></p>
<span class="rarity" id="catchRarity"></span>
<div class="challenge-buttons" id="challengeButtons">
<button class="start-challenge-btn" onclick="acceptChallenge()">开始挑战</button>
<button class="give-up-challenge-btn" onclick="declineChallenge()">放弃挑战</button>
</div>
</div>
<!-- 成就通知 -->
<div class="achievement-notification" id="achievementNotification">
<img src="徽章.png" alt="成就徽章">
<div class="content">
<div class="title">🏆 成就解锁</div>
<div class="name" id="achievementName"></div>
</div>
</div>
<div class="death-message" id="deathMessage">
<img src="shark2.jpg" alt="死亡鲨鱼">
<p>🦈 鲨鱼吃了垃圾死掉了!我们要保护环境啊————不要乱扔垃圾了!</p>
<button class="restart-btn" onclick="restartGame()">重新开始</button>
</div>
<!-clas -->
<div class="fish-basket" id="fishBasket">
<img src="鱼篓.jpg" alt="鱼篓">
<div class="count-badge" id="basketCount">0</div>
</div>
<!-- 鱼篓悬浮提示 -->
<div class="basket-tooltip" id="basketTooltip">
<h4>🐟 鱼篓 (0/10)</h4>
<div class="empty-msg">鱼篓是空的</div>
</div>
<!-- 喂食选择框 -->
<div class="feed-selector" id="feedSelector">
<h3>🦈 选择要喂给鲨鱼的食物</h3>
<select id="feedSelect">
<option value="">-- 请选择 --</option>
</select>
<div class="btn-group">
<button class="feed-btn" id="confirmFeedBtn" onclick="confirmFeed()" disabled>喂食</button>
<button class="cancel-btn" onclick="closeFeedSelector()">取消</button>
</div>
</div>
<!-- 拼图游戏 -->
<!-- 拼图游戏 -->
<div class="puzzle-game" id="puzzleGame">
<div class="puzzle-header">
<h2>🧩 华容道挑战!</h2>
<div class="puzzle-timer" id="puzzleTimer">90</div>
</div>
<div class="puzzle-container" id="puzzleContainer"></div>
<div class="puzzle-buttons">
<button class="give-up-btn" onclick="giveUpPuzzle()">放弃挑战</button>
</div>
<div class="puzzle-preview">
<img id="puzzlePreviewImg" src="" alt="预览">
<div class="puzzle-preview-label">目标图片</div>
</div>
</div>
<!-- 拼图结果 -->
<div class="puzzle-result" id="puzzleResult">
<img id="puzzleResultImg" src="" alt="结果">
<h2 id="puzzleResultTitle"></h2>
<p id="puzzleResultDesc"></p>
<button class="success-btn" id="puzzleResultBtn" onclick="closePuzzleResult()">确定</button>
</div>
<!-- 段位评价弹窗 -->
<div class="rank-evaluation" id="rankEvaluation">
<div class="rank-icon" id="rankIcon">🦈</div>
<div class="rank-title">本次喂食评价</div>
<div class="rank-name" id="rankName">万事不挂怀</div>
<div class="rank-score" id="rankScore">总积分: 300</div>
<div class="rank-stars" id="rankStars">
<span class="rank-star">⭐</span>
<span class="rank-star">⭐</span>
<span class="rank-star">⭐</span>
<span class="rank-star">⭐</span>
<span class="rank-star">⭐</span>
</div>
<div class="rank-desc" id="rankDesc">完美的盛宴!鲨鱼心满意足!</div>
<button class="rank-continue-btn" onclick="closeRankEvaluation()">继续游戏</button>
</div>
<p class="hint">点击鱼竿开始钓鱼 | 点击鲨鱼与AI对话 | 点击鱼篓选择喂食</p>=>
<script>
const animal = document.getElementById('animal');
const animalVideo = document.getElementById('animalVideo');
const feedCountEl = document.getElementById('feedCount');
const deathMessage = document.getElementById('deathMessage');
const fishingRod = document.getElementById('fishingRod');
const fishingGame = document.getElementById('fishingGame');
const fishTarget = document.getElementById('fishTarget');
const playerBar = document.getElementById('playerBar');
const progressBar = document.getElementById('progressBar');
const fishingTrack = document.getElementById('fishingTrack');
const catchNotification = document.getElementById('catchNotification');
const catchImage = document.getElementById('catchImage');
const catchTitle = document.getElementById('catchTitle');
const catchDescription = document.getElementById('catchDescription');
const catchRarity = document.getElementById('catchRarity');
const achievementNotification = document.getElementById('achievementNotification');
const achievementName = document.getElementById('achievementName');
let feedCount = 0;
let fedFoods = [];
let feedScore = 0; // 当前喂食积分
let isPlaying = false;
let isDead = false;
let isFishing = false;
// 鱼篓系统
let fishBasketItems = []; // 存储鱼篓中的鱼 [{id: 1, name: '沙丁鱼'}, ...]
const MAX_BASKET_SIZE = 10;
// 钓鱼游戏变量
let fishPos = 0;
let playerPos = 0;
let fishVelocity = 0;
let playerVelocity = 0;
let progress = 0;
let fishingInterval = null;
let spacePressed = false;
let trackHeight = 0;
let currentFoodId = 0;
let currentDifficulty = {};
// 成就统计
let consecutiveFails = 0;
let consecutiveSuccess = 0;
let consecutiveTrash = 0;
let totalFails = 0;
let totalSuccess = 0;
let unlockedFishingAchievements = new Set(JSON.parse(sessionStorage.getItem('fishingAchievements') || '[]'));
// 成就配置
const achievementsConfig = [
{
id: 'captain',
name: '空军上尉',
description: '连续5次钓鱼失败',
icon: '徽章.png'
},
{
id: 'colonel',
name: '空军上校',
description: '连续10次钓鱼失败',
icon: '徽章.png'
},
{
id: 'general',
name: '将!军!',
description: '累计钓鱼失败50次',
icon: '徽章.png'
},
{
id: 'fisher',
name: '捕鱼能手',
description: '连续3次成功钓鱼',
icon: '徽章.png'
},
{
id: 'master',
name: '河边蓑笠翁',
description: '累计10次钓鱼成功',
icon: '徽章.png'
},
{
id: 'trash',
name: '到底是谁在乱扔垃圾',
description: '连续钓上3次垃圾',
icon: '徽章.png'
},
{
id: 'time',
name: '时光啊时光...',
description: '首次中杆金枪鱼但没钓上',
icon: '徽章.png'
},
{
id: 'oldman',
name: '老人与海',
description: '首次钓上金枪鱼',
icon: '徽章.png'
},
{
id: 'master_sea',
name: '大海之主',
description: '集齐所有鱼类(10种)',
icon: '徽章.png'
},
{
id: 'crystal',
name: '琉璃梦幻紫层霄水晶坷垃',
description: '首次获得隐藏款三.png',
icon: '徽章.png'
},
{
id: 'mystery',
name: '趣...多多?',
description: '首次获得四.png',
icon: '徽章.png'
},
{
id: 'nereus',
name: '涅柔斯',
description: '钓鱼100次中有80杆成功',
icon: '徽章.png'
},
{
id: 'ebisu',
name: '惠比寿',
description: '钓鱼10次中有7杆成功',
icon: '徽章.png'
}
];
// 白色污染案例库
const pollutionCases = [
'🐢 2018年,泰国一只海龟因误食80多个塑料袋而死亡。塑料袋在海龟胃中无法消化,导致它无法进食而饿死。',
'🐋 2019年,菲律宾海岸发现一头死亡的柯氏喙鲸,其胃中有40公斤塑料垃圾,包括购物袋和大米袋。',
'🦭 每年约有10万只海洋哺乳动物和100万只海鸟因塑料污染而死亡。它们误食塑料或被塑料缠绕窒息。',
'🐠 研究发现,90%的海鸟胃中含有塑料碎片。到2050年,几乎所有海鸟都会受到塑料污染影响。',
'🦈 塑料微粒进入食物链,从浮游生物到大型鱼类,最终回到人类餐桌。我们每周平均摄入5克塑料。',
'🐙 2020年,印尼一只海豚被发现胃中有5.9公斤塑料垃圾,包括115个塑料杯和25个塑料袋。',
'🦀 塑料污染导致珊瑚礁生态系统崩溃,影响超过25%的海洋生物栖息地。',
'🐚 每年约有800万吨塑料垃圾进入海洋,相当于每分钟倾倒一卡车塑料。',
'🦑 海洋中的塑料会分解成微塑料,但永远不会完全消失,可能存在数百年。',
'🐡 研究显示,52%的海龟曾误食塑料垃圾,这严重影响它们的消化系统和生存能力。'
];
let caughtFoods = new Set(JSON.parse(sessionStorage.getItem('caughtFoods') || '[]'));
let totalAttempts = parseInt(sessionStorage.getItem('totalAttempts') || '0');
let recentAttempts = JSON.parse(sessionStorage.getItem('recentAttempts') || '[]'); // 最近10次记录
// 食物配置:概率、稀有度、描述、难度
// 垃圾类型:7, 8, 9
const trashIds = [7, 8, 9];
const foodConfig = {
1: {
name: '沙丁鱼',
probability: 0.18,
rarity: 'common',
rarityText: '普通',
description: '最常见的小鱼,容易钓到',
barSize: 80,
fishSpeed: 18,
score: 10
},
2: {
name: '鳕鱼',
probability: 0.12,
rarity: 'uncommon',
rarityText: '常见',
description: '珍贵的白肉鱼',
barSize: 70,
fishSpeed: 22,
score: 20
},
3: {
name: '金枪鱼',
probability: 0.10,
rarity: 'legendary',
rarityText: '传说',
description: '极其罕见的大鱼!',
barSize: 45,
fishSpeed: 30,
score: 100
},
4: {
name: '稀有贝壳',
probability: 0.08,
rarity: 'epic',
rarityText: '史诗',
description: '珍贵的贝壳,很难钓到',
barSize: 50,
fishSpeed: 28,
score: 60
},
5: {
name: '普通贝壳',
probability: 0.10,
rarity: 'rare',
rarityText: '稀有',
description: '漂亮的贝壳',
barSize: 65,
fishSpeed: 20,
score: 30
},
6: {
name: '章鱼',
probability: 0.08,
rarity: 'rare',
rarityText: '稀有',
description: '滑溜溜的章鱼,很难抓住!',
barSize: 60,
fishSpeed: 25,
score: 35
},
7: {
name: '可乐瓶',
probability: 0.04,
rarity: 'trash',
rarityText: '垃圾',
description: '可乐瓶...这会害死鲨鱼!',
barSize: 75,
fishSpeed: 15,
score: 0
},
8: {
name: '外卖盒',
probability: 0.03,
rarity: 'trash',
rarityText: '垃圾',
description: '外卖盒...不能吃',
barSize: 75,
fishSpeed: 15,
score: 0
},
9: {
name: '塑料袋',
probability: 0.03,
rarity: 'trash',
rarityText: '垃圾',
description: '会套住鲨鱼的!',
barSize: 75,
fishSpeed: 15,
score: 0
},
10: {
name: '鲫鱼',
probability: 0.10,
rarity: 'common',
rarityText: '普通',
description: '只是一条普通的小鱼',
barSize: 75,
fishSpeed: 18,
score: 12
},
11: {
name: '胭脂鱼',
probability: 0.05,
rarity: 'protected',
rarityText: '保护鱼类',
description: '有着"亚洲美人鱼""吉祥鱼"的美称,是野外种群为国家二级保护野生动物',
barSize: 55,
fishSpeed: 25,
score: 80
},
12: {
name: '长江刀鱼',
probability: 0.04,
rarity: 'rare',
rarityText: '稀有',
description: '非常好吃的鱼呢!',
barSize: 55,
fishSpeed: 25,
score: 40
},
13: {
name: '中华鲟',
probability: 0.03,
rarity: 'protected',
rarityText: '保护鱼类',
description: '是地球上现存最古老的鱼类之一,被誉为"水中活化石""长江鱼王",同时也是国家一级保护野生动物',
barSize: 50,
fishSpeed: 28,
score: 120
},
14: {
name: '神秘包裹',
probability: 0.05,
rarity: 'legendary',
rarityText: '传说',
description: '嗯...是盲盒呢!',
barSize: 45,
fishSpeed: 30,
score: 0
}
};
// 加载成就
function loadAchievements() {
const unlocked = JSON.parse(sessionStorage.getItem('unlockedAchievements') || '[]');
unlocked.forEach(img => {
const achMap = {'一.png': 'ach1', '二.png': 'ach2', '三.png': 'ach3', '四.png': 'ach4', '五.png': 'ach5'};
const achId = achMap[img];
if (achId) {
document.getElementById(achId).classList.add('unlocked');
}
});
}
loadAchievements();
// 初始赠送盲盒
function giveStarterBox() {
const hasReceivedStarterBox = sessionStorage.getItem('starterBoxReceived');
if (!hasReceivedStarterBox) {
sessionStorage.setItem('starterBoxReceived', 'true');
// 延迟显示欢迎礼包
setTimeout(() => {
catchImage.src = '14.jpg';
catchTitle.textContent = '🎁 欢迎礼包!';
catchDescription.textContent = '欢迎来到钓鱼游戏!送你一个神秘包裹作为新手礼物!点击关闭领取~';
catchRarity.textContent = '传说';
catchRarity.className = 'rarity legendary';
// 显示关闭按钮,隐藏挑战按钮
document.getElementById('catchCloseBtn').style.display = 'block';
document.getElementById('challengeButtons').style.display = 'none';
// 设置关闭后的回调
window.starterBoxPending = true;
catchNotification.classList.add('show');
}, 1000);
}
}
giveStarterBox();
// 关闭钓到提示
function closeCatchNotification() {
catchNotification.classList.remove('show');
document.getElementById('catchCloseBtn').style.display = 'none';
// 如果是新手礼包
if (window.starterBoxPending) {
window.starterBoxPending = false;
addToBasket(14);
}
// 如果是奖励盲盒
if (window.bonusBoxPending) {
window.bonusBoxPending = false;
addToBasket(14);
}
}
// 渲染成就面板
function renderAchievementsPanel() {
const panel = document.getElementById('achievementsPanel');
const unlockedCount = unlockedFishingAchievements.size;
const totalCount = achievementsConfig.length;
panel.innerHTML = `<h3>🏆 钓鱼成就 (${unlockedCount}/${totalCount})</h3>`;
achievementsConfig.forEach(ach => {
const isUnlocked = unlockedFishingAchievements.has(ach.name);
const item = document.createElement('div');
item.className = `achievement-item ${isUnlocked ? 'unlocked' : 'locked'}`;
item.onclick = (e) => {
e.stopPropagation();
showAchievementDetail(ach, isUnlocked);
};
let progress = '';
if (!isUnlocked) {
if (ach.id === 'captain') progress = `${consecutiveFails}/5`;
else if (ach.id === 'colonel') progress = `${consecutiveFails}/10`;
else if (ach.id === 'general') progress = `${totalFails}/50`;
else if (ach.id === 'fisher') progress = `${consecutiveSuccess}/3`;
else if (ach.id === 'master') progress = `${totalSuccess}/10`;
else if (ach.id === 'trash') progress = `${consecutiveTrash}/3`;
else if (ach.id === 'master_sea') progress = `${caughtFoods.size}/10`;
else if (ach.id === 'nereus') {
if (totalAttempts >= 100) {
progress = `${totalSuccess}/100`;
} else {
progress = `隐藏 (${totalAttempts}/100)`;
}
}
else if (ach.id === 'ebisu') {
const recentSuccessCount = recentAttempts.reduce((a, b) => a + b, 0);
progress = `隐藏 (${recentSuccessCount}/10)`;
}
else if (ach.id === 'time' || ach.id === 'oldman' || ach.id === 'crystal' || ach.id === 'mystery') progress = '隐藏';
}
item.innerHTML = `
<img src="${ach.icon}" alt="${ach.name}">
<div class="info">
<div class="name">${ach.name}</div>
<div class="progress">${isUnlocked ? '已解锁' : progress}</div>
</div>
`;
panel.appendChild(item);
});
}
// 显示成就详情
function showAchievementDetail(ach, isUnlocked) {
const detail = document.getElementById('achievementDetail');
document.getElementById('detailIcon').src = ach.icon;
document.getElementById('detailName').textContent = ach.name;
document.getElementById('detailDescription').textContent = ach.description;
document.getElementById('detailStatus').textContent = isUnlocked ? '已解锁' : '未解锁';
document.getElementById('detailStatus').className = `status ${isUnlocked ? 'unlocked' : 'locked'}`;
detail.classList.add('show');
}
// 关闭成就详情
function closeAchievementDetail() {
document.getElementById('achievementDetail').classList.remove('show');
}
// 初始化成就面板
renderAchievementsPanel();
// 切换成就面板显示
function toggleAchievementsPanel() {
const panel = document.getElementById('achievementsPanel');
panel.classList.toggle('show');
}
// 点击面板外部关闭
document.addEventListener('click', (e) => {
const panel = document.getElementById('achievementsPanel');
const button = document.getElementById('achievementsButton');
if (panel.classList.contains('show') &&
!panel.contains(e.target) &&
!button.contains(e.target)) {
panel.classList.remove('show');
}
});
// 重新开始游戏
function restartGame() {
location.reload();
}
// 点击鲨鱼跳转AI对话页面
animal.addEventListener('click', (e) => {
if (!isPlaying && !isDead && !isFishing) {
window.location.href = 'ai-chat.html';
}
});
// 点击鱼竿开始钓鱼
fishingRod.addEventListener('click', () => {
if (!isPlaying && !isDead && !isFishing) {
startFishing();
}
});
// 根据概率选择食物
function selectFoodByProbability() {
const rand = Math.random();
let cumulative = 0;
for (let id = 1; id <= 14; id++) {
cumulative += foodConfig[id].probability;
if (rand < cumulative) {
return id;
}
}
return 1; // 默认返回沙丁鱼
}
// 开始钓鱼
function startFishing() {
isFishing = true;
fishingGame.classList.add('active');
// 根据概率选择食物
currentFoodId = selectFoodByProbability();
currentDifficulty = foodConfig[currentFoodId];
// 根据难度调整玩家条大小
playerBar.style.height = currentDifficulty.barSize + 'px';
// 初始化钓鱼游戏
trackHeight = fishingTrack.clientHeight;
fishPos = trackHeight / 2;
playerPos = trackHeight / 2;
fishVelocity = 0;
playerVelocity = 0;
progress = 0;
updatePositions();
// 游戏循环(更新频率更高,游戏更流畅更快)
fishingInterval = setInterval(updateFishing, 30);
}
// 更新钓鱼游戏
function updateFishing() {
trackHeight = fishingTrack.clientHeight;
// 鱼的AI移动(类似星露谷)
const fishTargetHeight = 50;
const playerBarHeight = currentDifficulty.barSize;
// 根据难度调整鱼的速度变化频率和幅度(更频繁、更剧烈)
const speedChangeChance = 0.05 + (currentDifficulty.fishSpeed - 20) * 0.003;
if (Math.random() < speedChangeChance) {
fishVelocity = (Math.random() - 0.5) * (currentDifficulty.fishSpeed * 1.5);
}
// 鱼的移动(更快)
fishPos += fishVelocity;
fishVelocity *= 0.92; // 减少阻力,让鱼移动更快
// 边界检测(反弹更强)
if (fishPos < 0) {
fishPos = 0;
fishVelocity = Math.abs(fishVelocity) * 0.7;
}
if (fishPos > trackHeight - fishTargetHeight) {
fishPos = trackHeight - fishTargetHeight;
fishVelocity = -Math.abs(fishVelocity) * 0.7;
}
// 玩家控制(按住空格上升,松开下降)
if (spacePressed) {
playerVelocity = -10; // 上升更快
} else {
playerVelocity = 6; // 下降(重力)
}
playerPos += playerVelocity;
// 玩家边界检测
if (playerPos < 0) playerPos = 0;
if (playerPos > trackHeight - playerBarHeight) {
playerPos = trackHeight - playerBarHeight;
}
// 检测是否覆盖
const isCatching = (
playerPos < fishPos + fishTargetHeight &&
playerPos + playerBarHeight > fishPos
);
// 更新进度
if (isCatching) {
progress += 2;
if (progress > 100) progress = 100;
} else {
progress -= 2; // 失败惩罚更大
if (progress < 0) progress = 0;
}
// 更新显示
updatePositions();
progressBar.style.width = progress + '%';
// 检查胜利条件
if (progress >= 100) {
endFishing(true);
}
// 检查失败条件(鱼逃跑)
if (progress <= 0 && Math.random() < 0.02) {
endFishing(false);
}
}
// 更新位置
function updatePositions() {
fishTarget.style.top = fishPos + 'px';
playerBar.style.top = playerPos + 'px';
}
// 显示钓到的食物
function showCatchNotification(foodId) {
const food = foodConfig[foodId];
const isTrash = trashIds.includes(foodId);
catchImage.src = `${foodId}.jpg`;
catchTitle.textContent = `🎣 钓到了 ${food.name}!`;
// 如果是垃圾,显示污染案例
if (isTrash) {
const randomCase = pollutionCases[Math.floor(Math.random() * pollutionCases.length)];
catchDescription.innerHTML = food.description + '<br><br><span style="color: #ffcccc; font-size: 13px; line-height: 1.6;">' + randomCase + '</span>';
} else {
catchDescription.textContent = food.description;
}
catchRarity.textContent = food.rarityText;
catchRarity.className = `rarity ${food.rarity}`;
catchNotification.classList.add('show');
// 检查成就
checkAchievements(true, isTrash, foodId);
setTimeout(() => {
catchNotification.classList.remove('show');
// 所有东西都放入鱼篓(盲盒需要打开时才挑战)
setTimeout(() => {
addToBasket(foodId);
}, 500);
}, 2500);
}
// 添加到鱼篓
function addToBasket(foodId) {
if (fishBasketItems.length >= MAX_BASKET_SIZE) {
alert('鱼篓已满!请先喂食鲨鱼腾出空间。');
return;
}
const food = foodConfig[foodId];
fishBasketItems.push({
id: foodId,
name: food.name,
timestamp: Date.now()
});
updateBasketUI();
}
// 更新鱼篓UI
function updateBasketUI() {
const countBadge = document.getElementById('basketCount');
const tooltip = document.getElementById('basketTooltip');
countBadge.textContent = fishBasketItems.length;
// 更新悬浮提示内容
let html = `<h4>🐟 鱼篓 (${fishBasketItems.length}/${MAX_BASKET_SIZE})</h4>`;
if (fishBasketItems.length === 0) {
html += '<div class="empty-msg">鱼篓是空的</div>';
} else {
// 定义品质优先级(数值越大优先级越高)
const rarityPriority = {
'legendary': 5,
'epic': 4,
'rare': 3,
'uncommon': 2,
'common': 1,
'protected': 6,
'trash': 0
};
// 按品质从高到低排序
const sortedItems = [...fishBasketItems].sort((a, b) => {
const rarityA = foodConfig[a.id].rarity;
const rarityB = foodConfig[b.id].rarity;
return rarityPriority[rarityB] - rarityPriority[rarityA];
});
sortedItems.forEach((item, index) => {
const food = foodConfig[item.id];
html += `
<div class="fish-item">
<img src="${item.id}.jpg" alt="${item.name}">
<span>${item.name} <span style="color: ${getRarityColor(food.rarity)}; font-size: 12px;">[${food.rarityText}]</span></span>
</div>
`;
});
}
tooltip.innerHTML = html;
// 更新喂食选择框
updateFeedSelector();
}
// 获取品质颜色
function getRarityColor(rarity) {
const colors = {
'common': '#808080',
'uncommon': '#4CAF50',
'rare': '#2196F3',
'epic': '#9C27B0',
'legendary': '#FF9800',
'trash': '#8B4513',
'protected': '#E91E63'
};
return colors[rarity] || '#fff';
}
// 更新喂食选择框
function updateFeedSelector() {
const select = document.getElementById('feedSelect');
const confirmBtn = document.getElementById('confirmFeedBtn');
select.innerHTML = '<option value="">-- 请选择 --</option>';
fishBasketItems.forEach((item, index) => {
const food = foodConfig[item.id];
const option = document.createElement('option');
option.value = index;
option.textContent = `${item.name} [${food.rarityText}] (+${food.score}分)`;
select.appendChild(option);
});
confirmBtn.disabled = true;
}
// 鱼篓悬浮显示
const fishBasket = document.getElementById('fishBasket');
const basketTooltip = document.getElementById('basketTooltip');
fishBasket.addEventListener('mouseenter', () => {
basketTooltip.classList.add('show');
});
fishBasket.addEventListener('mouseleave', () => {
basketTooltip.classList.remove('show');
});
// 点击鱼篓打开喂食选择框
fishBasket.addEventListener('click', () => {
if (!isPlaying && !isDead && !isFishing) {
openFeedSelector();
}
});
// 打开喂食选择框
function openFeedSelector() {
if (fishBasketItems.length === 0) {
alert('鱼篓是空的!先去钓鱼吧。');
return;
}
document.getElementById('feedSelector').classList.add('show');
basketTooltip.classList.remove('show');
}
// 关闭喂食选择框
function closeFeedSelector() {
document.getElementById('feedSelector').classList.remove('show');
}
// 选择变化时启用按钮
document.getElementById('feedSelect').addEventListener('change', (e) => {
document.getElementById('confirmFeedBtn').disabled = e.target.value === '';
});
// 当前待处理的盲盒索引
let pendingBoxIndex = -1;
// 确认喂食
function confirmFeed() {
const select = document.getElementById('feedSelect');
const index = parseInt(select.value);
if (isNaN(index) || index < 0 || index >= fishBasketItems.length) {
return;
}
let item = fishBasketItems[index];
// 如果是盲盒(14),必须先打开完成华容道挑战
if (item.id === 14) {
pendingBoxIndex = index;
closeFeedSelector();
openMysteryBox();
} else {
processFeeding(item, index);
}
}
// 打开神秘包裹
function openMysteryBox() {
// 随机选择1-13的鱼(不包括垃圾7-9)
const validFishIds = [1, 2, 3, 4, 5, 6, 10, 11, 12, 13];
puzzleFishId = validFishIds[Math.floor(Math.random() * validFishIds.length)];
const food = foodConfig[puzzleFishId];
// 显示开盲盒结果和挑战按钮
catchImage.src = `${puzzleFishId}.jpg`;
catchTitle.textContent = `🎁 盲盒开出了 ${food.name}!`;
catchDescription.innerHTML = `完成华容道拼图即可获得这条鱼!<br>限时90秒,准备好了吗?`;
catchRarity.textContent = food.rarityText;
catchRarity.className = `rarity ${food.rarity}`;
// 显示挑战按钮,隐藏关闭按钮
document.getElementById('challengeButtons').style.display = 'flex';
document.getElementById('catchCloseBtn').style.display = 'none';
catchNotification.classList.add('show');
}
// 接受挑战
function acceptChallenge() {
catchNotification.classList.remove('show');
document.getElementById('challengeButtons').style.display = 'none';
// 从鱼篓移除盲盒
if (pendingBoxIndex >= 0) {
fishBasketItems.splice(pendingBoxIndex, 1);
updateBasketUI();
pendingBoxIndex = -1;
}
// 开始华容道游戏
startPuzzleGame();
}
// 放弃挑战
function declineChallenge() {
catchNotification.classList.remove('show');
document.getElementById('challengeButtons').style.display = 'none';
pendingBoxIndex = -1;
// 显示放弃提示
setTimeout(() => {
alert('你放弃了挑战,盲盒还在鱼篓里哦!');
}, 300);
}
// 处理喂食逻辑
function processFeeding(item, index) {
// 从鱼篓移除
fishBasketItems.splice(index, 1);
updateBasketUI();
// 关闭选择框
closeFeedSelector();
// 检查是否是垃圾
if (trashIds.includes(item.id)) {
// 喂了垃圾,鲨鱼死亡
setTimeout(() => {
isDead = true;
animal.src = 'shark2.jpg';
deathMessage.style.display = 'flex';
}, 500);
} else {
// 正常喂食鲨鱼
fedFoods.push(item.id);
feedCount++;
// 累加积分
const food = foodConfig[item.id];
feedScore += food.score || 0;
// 更新显示(显示当前积分)
feedCountEl.textContent = `${feedCount} (${feedScore}分)`;
playVideo();
}
}
// 结束钓鱼
function endFishing(success) {
clearInterval(fishingInterval);
isFishing = false;
fishingGame.classList.remove('active');
if (success) {
// 钓到了!显示食物
showCatchNotification(currentFoodId);
} else {
// 失败了,鱼跑了
checkAchievements(false, false, currentFoodId);
alert('鱼跑了!再试一次吧!');
}
}
// 根据积分获取段位评价
function getRankByScore(score) {
if (score >= 300) {
return { rank: '万事不挂怀', color: '#FF1493', desc: '完美的盛宴!鲨鱼心满意足!' };
} else if (score >= 200) {
return { rank: '香螺酢荐一欢呼', color: '#FF69B4', desc: '极致的美味!鲨鱼欢呼雀跃!' };
} else if (score >= 150) {
return { rank: '钟鸣鼎食', color: '#FFD700', desc: '豪华的盛宴!鲨鱼非常满意!' };
} else if (score >= 100) {
return { rank: '酒足饭饱', color: '#FFA500', desc: '丰盛的一餐!鲨鱼吃得很满足!' };
} else if (score >= 70) {
return { rank: '适腹充肠', color: '#90EE90', desc: '不错的食物,鲨鱼吃饱了。' };
} else if (score >= 50) {
return { rank: '半饱之度', color: '#87CEEB', desc: '还算可以,鲨鱼半饱。' };
} else if (score >= 30) {
return { rank: '聊以慰饥', color: '#B0C4DE', desc: '勉强填饱肚子。' };
} else {
return { rank: '食不尽味', color: '#808080', desc: '食之无味,鲨鱼不太满意。' };
}
}
// 根据食物选择结果图片
function selectResultByFood(foodId) {
const rand = Math.random();
// 5%概率出现隐藏款
if (rand < 0.05) {
return '三.png';
}
// 根据食物类型选择结果
switch(foodId) {
case 1: // 沙丁鱼 -> 一.png 或 四.png
return Math.random() < 0.5 ? '一.png' : '四.png';
case 2: // 鳕鱼 -> 二.png
return '二.png';
case 3: // 金枪鱼 -> 五.png 或 二.png
return Math.random() < 0.5 ? '五.png' : '二.png';
case 4: // 稀有贝壳 -> 四.png
return '四.png';
case 5: // 普通贝壳 -> 一.png
return '一.png';
case 6: // 章鱼 -> 五.png
return '五.png';
default:
return '一.png';
}
}
// 播放喂食动画
function playVideo() {
isPlaying = true;
animal.style.display = 'none';
animalVideo.style.display = 'block';
animalVideo.currentTime = 0;
animalVideo.play();
setTimeout(() => {
animalVideo.pause();
animalVideo.style.display = 'none';
animal.style.display = 'block';
isPlaying = false;
if (feedCount >= 3) {
// 获取段位评价
const rankInfo = getRankByScore(feedScore);
// 保存段位信息到sessionStorage
sessionStorage.setItem('feedScore', feedScore.toString());
sessionStorage.setItem('feedRank', rankInfo.rank);
sessionStorage.setItem('feedRankColor', rankInfo.color);
sessionStorage.setItem('feedRankDesc', rankInfo.desc);
const combination = fedFoods.sort().join('-');
let resultMap = JSON.parse(sessionStorage.getItem('resultMap') || '{}');
if (!resultMap[combination]) {
// 根据最后一次喂食的食物选择结果
const lastFood = fedFoods[fedFoods.length - 1];
resultMap[combination] = selectResultByFood(lastFood);
sessionStorage.setItem('resultMap', JSON.stringify(resultMap));
}
const resultImage = resultMap[combination];
sessionStorage.setItem('currentResult', resultImage);
// 检查特殊成就
if (resultImage === '三.png' && !unlockedFishingAchievements.has('琉璃梦幻紫层霄水晶坷垃')) {
showAchievement('琉璃梦幻紫层霄水晶坷垃');
}
if (resultImage === '四.png' && !unlockedFishingAchievements.has('趣...多多?')) {
showAchievement('趣...多多?');
}
// 解锁成就
let unlocked = JSON.parse(sessionStorage.getItem('unlockedAchievements') || '[]');
if (!unlocked.includes(resultImage)) {
unlocked.push(resultImage);
sessionStorage.setItem('unlockedAchievements', JSON.stringify(unlocked));
}
// 显示段位评价弹窗
showRankEvaluation(feedScore, rankInfo);
// 重置数据
feedCount = 0;
fedFoods = [];
feedScore = 0;
feedCountEl.textContent = '0 (0分)';
loadAchievements();
}
}, 2000);
}
// 显示成就通知
function showAchievement(achievementText) {
if (unlockedFishingAchievements.has(achievementText)) {
return; // 已解锁的成就不再显示
}
unlockedFishingAchievements.add(achievementText);
sessionStorage.setItem('fishingAchievements', JSON.stringify([...unlockedFishingAchievements]));
achievementName.textContent = achievementText;
achievementNotification.classList.add('show');
setTimeout(() => {
achievementNotification.classList.remove('show');
}, 4000);
}
// 显示奖励盲盒通知
function showBonusBoxNotification() {
catchImage.src = '14.jpg';
catchTitle.textContent = '🎁 奖励!钓鱼达人礼包!';
catchDescription.textContent = `恭喜你成功钓鱼${totalSuccess}次!获得一个神秘包裹作为奖励!点击关闭领取~`;
catchRarity.textContent = '传说';
catchRarity.className = 'rarity legendary';
// 显示关闭按钮,隐藏挑战按钮
document.getElementById('catchCloseBtn').style.display = 'block';
document.getElementById('challengeButtons').style.display = 'none';
// 设置关闭后的回调
window.bonusBoxPending = true;
catchNotification.classList.add('show');
}
// 检查成就
function checkAchievements(isSuccess, isTrash, foodId = 0) {
// 记录钓鱼尝试
totalAttempts++;
sessionStorage.setItem('totalAttempts', totalAttempts.toString());
// 记录最近10次
recentAttempts.push(isSuccess ? 1 : 0);
if (recentAttempts.length > 10) {
recentAttempts.shift();
}
sessionStorage.setItem('recentAttempts', JSON.stringify(recentAttempts));
// 检查惠比寿成就(10次中7次成功)
if (recentAttempts.length === 10) {
const recentSuccessCount = recentAttempts.reduce((a, b) => a + b, 0);
if (recentSuccessCount >= 7 && !unlockedFishingAchievements.has('惠比寿')) {
showAchievement('惠比寿');
}
}
// 检查涅柔斯成就(100次中80次成功)
if (totalAttempts >= 100) {
const successRate = totalSuccess / totalAttempts;
if (successRate >= 0.8 && !unlockedFishingAchievements.has('涅柔斯')) {
showAchievement('涅柔斯');
}
}
if (isSuccess) {
consecutiveSuccess++;
consecutiveFails = 0;
totalSuccess++;
// 记录钓到的食物(不包括垃圾7-9和盲盒14)
const validFoodIds = [1, 2, 3, 4, 5, 6, 10, 11, 12, 13];
if (validFoodIds.includes(foodId)) {
caughtFoods.add(foodId);
sessionStorage.setItem('caughtFoods', JSON.stringify([...caughtFoods]));
// 首次钓上金枪鱼
if (foodId === 3 && !unlockedFishingAchievements.has('老人与海')) {
showAchievement('老人与海');
}
// 集齐所有食物(10种有效鱼类)
if (caughtFoods.size === 10 && !unlockedFishingAchievements.has('大海之主')) {
showAchievement('大海之主');
}
}
if (!isTrash) {
consecutiveTrash = 0;
}
// 连续三次成功
if (consecutiveSuccess === 3 && !unlockedFishingAchievements.has('捕鱼能手')) {
showAchievement('捕鱼能手');
}
// 累计十次成功
if (totalSuccess === 10 && !unlockedFishingAchievements.has('河边蓑笠翁')) {
showAchievement('河边蓑笠翁');
}
// 每钓10条送一个盲盒
if (totalSuccess > 0 && totalSuccess % 10 === 0) {
setTimeout(() => {
showBonusBoxNotification();
}, 3000);
}
// 连续三次垃圾
if (isTrash) {
consecutiveTrash++;
if (consecutiveTrash === 3 && !unlockedFishingAchievements.has('到底是谁在乱扔垃圾')) {
showAchievement('到底是谁在乱扔垃圾');
}
}
} else {
consecutiveFails++;
consecutiveSuccess = 0;
consecutiveTrash = 0;
totalFails++;
// 首次金枪鱼逃跑
if (foodId === 3 && !unlockedFishingAchievements.has('时光啊时光...')) {
showAchievement('时光啊时光...');
}
// 连续五次失败
if (consecutiveFails === 5 && !unlockedFishingAchievements.has('空军上尉')) {
showAchievement('空军上尉');
}
// 连续十次失败
if (consecutiveFails === 10 && !unlockedFishingAchievements.has('空军上校')) {
showAchievement('空军上校');
}
// 累计五十次失败
if (totalFails === 50 && !unlockedFishingAchievements.has('将!军!')) {
showAchievement('将!军!');
}
}
// 更新成就面板
renderAchievementsPanel();
}
// 生成背景飘过的食物
function createFloatingFood() {
if (isDead) return;
const food = document.createElement('img');
const foodId = Math.floor(Math.random() * 6) + 1; // 1-6,不包括垃圾
food.src = `${foodId}.jpg`;
food.className = 'floating-food';
const randomY = Math.random() * (window.innerHeight - 100);
food.style.top = randomY + 'px';
food.style.right = '-60px';
const duration = 15 + Math.random() * 10; // 15-25秒,缓慢飘过
food.style.animationDuration = duration + 's';
document.body.appendChild(food);
food.addEventListener('animationend', () => {
food.remove();
});
}
// 定期生成飘过的食物
setInterval(createFloatingFood, 3000);
createFloatingFood();
// 键盘控制
document.addEventListener('keydown', (e) => {
if (e.code === 'Space' && isFishing) {
e.preventDefault();
spacePressed = true;
}
});
document.addEventListener('keyup', (e) => {
if (e.code === 'Space') {
e.preventDefault();
spacePressed = false;
}
});
// ========== 华容道拼图游戏 ==========
let puzzleTimer = null;
let puzzleTimeLeft = 300; // 5分钟
let puzzleFishId = 0;
let puzzlePieces = []; // 0-7为图片块,8为空格
let emptyPos = 8; // 空格位置
let puzzleStarted = false;
// 启动拼图游戏
function startPuzzleGame() {
puzzleStarted = true;
// 初始化华容道
initSlidingPuzzle(puzzleFishId);
// 显示游戏界面
document.getElementById('puzzleGame').classList.add('show');
document.getElementById('puzzlePreviewImg').src = `${puzzleFishId}.jpg`;
// 启动计时器(5分钟)
puzzleTimeLeft = 300;
updateTimerDisplay();
puzzleTimer = setInterval(() => {
puzzleTimeLeft--;
updateTimerDisplay();
if (puzzleTimeLeft <= 0) {
endPuzzleGame(false);
}
}, 1000);
}
// 更新计时器显示
function updateTimerDisplay() {
const timerEl = document.getElementById('puzzleTimer');
timerEl.textContent = puzzleTimeLeft;
if (puzzleTimeLeft <= 10) {
timerEl.classList.add('warning');
} else {
timerEl.classList.remove('warning');
}
}
// 初始化华容道拼图
function initSlidingPuzzle(fishId) {
const container = document.getElementById('puzzleContainer');
container.innerHTML = '';
// 创建9个位置(0-7为图片块,8为空格)
puzzlePieces = [0, 1, 2, 3, 4, 5, 6, 7, 8];
emptyPos = 8;
// 打乱拼图(通过模拟移动确保可解)
shuffleSlidingPuzzle();
// 渲染拼图
renderSlidingPuzzle(fishId);
}
// 打乱华容道(通过模拟移动)
function shuffleSlidingPuzzle() {
// 模拟100次随机移动
for (let i = 0; i < 100; i++) {
const neighbors = getNeighbors(emptyPos);
const randomNeighbor = neighbors[Math.floor(Math.random() * neighbors.length)];
// 交换空格和随机相邻块
[puzzlePieces[emptyPos], puzzlePieces[randomNeighbor]] =
[puzzlePieces[randomNeighbor], puzzlePieces[emptyPos]];
emptyPos = randomNeighbor;
}
}
// 获取相邻位置
function getNeighbors(pos) {
const neighbors = [];
const row = Math.floor(pos / 3);
const col = pos % 3;
if (row > 0) neighbors.push(pos - 3); // 上
if (row < 2) neighbors.push(pos + 3); // 下
if (col > 0) neighbors.push(pos - 1); // 左
if (col < 2) neighbors.push(pos + 1); // 右
return neighbors;
}
// 渲染华容道拼图
function renderSlidingPuzzle(fishId) {
const container = document.getElementById('puzzleContainer');
container.innerHTML = '';
puzzlePieces.forEach((pieceIndex, position) => {
const piece = document.createElement('div');
piece.className = 'puzzle-piece';
piece.dataset.position = position;
piece.dataset.pieceIndex = pieceIndex;
if (pieceIndex === 8) {
// 空格
piece.classList.add('empty');
} else {
// 计算背景位置
const row = Math.floor(pieceIndex / 3);
const col = pieceIndex % 3;
piece.style.backgroundImage = `url('${fishId}.jpg')`;
piece.style.backgroundPosition = `${-col * 100}px ${-row * 100}px`;
piece.addEventListener('click', () => handleSlidingClick(position));
}
container.appendChild(piece);
});
}
// 处理华容道点击
function handleSlidingClick(position) {
// 检查是否与空格相邻
const neighbors = getNeighbors(emptyPos);
if (neighbors.includes(position)) {
// 交换点击的块和空格
[puzzlePieces[emptyPos], puzzlePieces[position]] =
[puzzlePieces[position], puzzlePieces[emptyPos]];
emptyPos = position;
// 重新渲染
renderSlidingPuzzle(puzzleFishId);
// 检查是否完成
if (checkSlidingComplete()) {
endPuzzleGame(true);
}
}
}
// 检查华容道是否完成
function checkSlidingComplete() {
for (let i = 0; i < puzzlePieces.length; i++) {
if (puzzlePieces[i] !== i) {
return false;
}
}
return true;
}
// 放弃挑战
function giveUpPuzzle() {
if (confirm('确定要放弃挑战吗?鱼会跑掉哦!')) {
endPuzzleGame(false, true);
}
}
// 结束拼图游戏
function endPuzzleGame(success, gaveUp = false) {
clearInterval(puzzleTimer);
puzzleStarted = false;
document.getElementById('puzzleGame').classList.remove('show');
const resultEl = document.getElementById('puzzleResult');
const resultImg = document.getElementById('puzzleResultImg');
const resultTitle = document.getElementById('puzzleResultTitle');
const resultDesc = document.getElementById('puzzleResultDesc');
const resultBtn = document.getElementById('puzzleResultBtn');
if (success) {
const food = foodConfig[puzzleFishId];
resultImg.src = `${puzzleFishId}.jpg`;
resultTitle.textContent = `🎉 恭喜获得 ${food.name}!`;
resultDesc.textContent = food.description;
resultBtn.className = 'success-btn';
resultBtn.textContent = '放入鱼篓';
resultBtn.onclick = () => {
closePuzzleResult();
addToBasket(puzzleFishId);
};
} else {
resultImg.src = '14.jpg';
resultTitle.textContent = gaveUp ? '😢 放弃挑战' : '⏰ 时间到!';
resultDesc.textContent = gaveUp ? '你放弃了挑战,鱼跑掉了...' : '很遗憾,拼图未能完成,鱼跑掉了...';
resultBtn.className = 'fail-btn';
resultBtn.textContent = '确定';
resultBtn.onclick = closePuzzleResult;
}
resultEl.classList.add('show');
}
// 关闭拼图结果
function closePuzzleResult() {
document.getElementById('puzzleResult').classList.remove('show');
}
// 显示段位评价弹窗
function showRankEvaluation(score, rankInfo) {
const modal = document.getElementById('rankEvaluation');
const rankName = document.getElementById('rankName');
const rankScore = document.getElementById('rankScore');
const rankDesc = document.getElementById('rankDesc');
const rankStars = document.getElementById('rankStars');
// 设置内容
rankName.textContent = rankInfo.rank;
rankName.style.color = rankInfo.color;
rankScore.textContent = `总积分: ${score}`;
rankDesc.textContent = rankInfo.desc;
// 设置边框颜色
modal.style.borderColor = rankInfo.color;
// 根据积分显示星星数量
let starCount = 1;
if (score >= 300) starCount = 5;
else if (score >= 200) starCount = 5;
else if (score >= 150) starCount = 4;
else if (score >= 100) starCount = 4;
else if (score >= 70) starCount = 3;
else if (score >= 50) starCount = 3;
else if (score >= 30) starCount = 2;
// 生成星星
let starsHtml = '';
for (let i = 0; i < starCount; i++) {
starsHtml += `<span class="rank-star">⭐</span>`;
}
rankStars.innerHTML = starsHtml;
// 显示弹窗
setTimeout(() => {
modal.classList.add('show');
}, 300);
}
// 关闭段位评价弹窗
function closeRankEvaluation() {
const modal = document.getElementById('rankEvaluation');
modal.classList.remove('show');
// 延迟打开新窗口,让弹窗关闭动画完成
setTimeout(() => {
window.open('pro.html', '_blank');
}, 400);
}
</script>
</body>
</html>
|
2301_80381209/tks_kiro_kjdlxstr
|
choose.html
|
HTML
|
unknown
| 91,737
|
<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>结果展示</title>
<style>
* { margin: 0; padding: 0; box-sizing: border-box; }
body {
font-family: Arial, sans-serif;
min-height: 100vh;
background: url('背景2.jpg') no-repeat center center fixed;
background-size: cover;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
padding: 20px;
}
h1 {
color: white;
margin-bottom: 20px;
text-shadow: 2px 2px 4px rgba(0,0,0,0.5);
}
.description {
background: rgba(0,0,0,0.7);
color: white;
padding: 15px 25px;
border-radius: 10px;
max-width: 600px;
text-align: center;
margin-bottom: 20px;
line-height: 1.6;
}
.result-container {
background: rgba(255,255,255,0.9);
padding: 30px;
border-radius: 15px;
box-shadow: 0 10px 30px rgba(0,0,0,0.3);
text-align: center;
max-width: 90%;
}
#resultImage {
max-width: 100%;
max-height: 400px;
border-radius: 10px;
margin-bottom: 20px;
}
.back-btn {
padding: 12px 30px;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
border: none;
border-radius: 25px;
font-size: 16px;
cursor: pointer;
transition: transform 0.3s, box-shadow 0.3s;
}
.back-btn:hover {
transform: translateY(-2px);
box-shadow: 0 5px 15px rgba(0,0,0,0.3);
}
.music-control {
position: fixed;
bottom: 20px;
right: 20px;
background: rgba(0,0,0,0.6);
color: white;
padding: 10px 20px;
border-radius: 20px;
cursor: pointer;
}
</style>
</head>
<body>
<audio id="bgMusic" loop autoplay preload="auto">
<source src="music.mp4" type="audio/mp4">
<source src="music.mp3" type="audio/mpeg">
</audio>
<h1>🎉 喂食完成!</h1>
<div class="description" id="description"></div>
<div class="result-container">
<img id="resultImage" alt="结果图片">
<br>
<button class="back-btn" onclick="goBack()">返回喂食</button>
</div>
<div class="music-control" onclick="toggleMusic()">
🎵 <span id="musicStatus">暂停音乐</span>
</div>
<script>
const bgMusic = document.getElementById('bgMusic');
const musicStatus = document.getElementById('musicStatus');
const descriptionEl = document.getElementById('description');
const descriptions = {
'一.png': '吃沙丁鱼、鲱鱼、鲭鱼后,这些是油性非常丰富的鱼类,肌肉组织呈深色。鲨鱼消化它们后,排出的粪便会偏向深绿色。',
'二.png': '吃金枪鱼后:金枪鱼的鱼肉呈深红色或粉红色,因为它肌肉中含有大量的肌红蛋白。消化后,粪便可能带有偏红或偏橙的色调,尤其是在大量进食后。',
'三.png': '🎊 恭喜开到隐藏款!',
'四.png': '吃鳕鱼后:鳕鱼是典型的白色鱼肉鱼类,脂肪含量较低,肌肉色素也较少。消化后产生的粪便颜色可能会浅一些,比如浅褐色或黄褐色。',
'五.png': '吃金枪鱼后:金枪鱼的鱼肉呈深红色或粉红色,因为它肌肉中含有大量的肌红蛋白。消化后,粪便可能带有偏红或偏橙的色调,尤其是在大量进食后。'
};
const resultImage = sessionStorage.getItem('currentResult');
if (resultImage) {
document.getElementById('resultImage').src = resultImage;
descriptionEl.textContent = descriptions[resultImage] || '';
} else {
const rand = Math.random();
let selectedImage;
if (rand < 0.2) selectedImage = '三.png';
else if (rand < 0.4) selectedImage = '一.png';
else if (rand < 0.6) selectedImage = '二.png';
else if (rand < 0.8) selectedImage = '四.png';
else selectedImage = '五.png';
document.getElementById('resultImage').src = selectedImage;
descriptionEl.textContent = descriptions[selectedImage] || '';
}
// 设置音乐源并自动播放
bgMusic.src = 'music.mp4';
bgMusic.volume = 0.5;
// 尝试自动播放
function tryPlayMusic() {
bgMusic.play().then(() => {
musicStatus.textContent = '暂停音乐';
}).catch(() => {
musicStatus.textContent = '点击播放音乐';
// 用户交互后播放
document.body.addEventListener('click', function playOnClick() {
bgMusic.play().then(() => {
musicStatus.textContent = '暂停音乐';
});
document.body.removeEventListener('click', playOnClick);
});
});
}
tryPlayMusic();
function toggleMusic() {
if (bgMusic.paused) {
bgMusic.play();
musicStatus.textContent = '暂停音乐';
} else {
bgMusic.pause();
musicStatus.textContent = '播放音乐';
}
}
function goBack() {
window.close();
window.location.href = 'choose.html';
}
</script>
</body>
</html>
|
2301_80381209/tks_kiro_kjdlxstr
|
pro.html
|
HTML
|
unknown
| 6,045
|
// DeepSeek API 代理服务器
// 运行方式: node proxy-server.js
const http = require('http');
const https = require('https');
const DEEPSEEK_API_KEY = 'sk-1c2957f7804a4dc4bde7756acfcaec86';
const PORT = 3000;
const server = http.createServer((req, res) => {
// 设置CORS头
res.setHeader('Access-Control-Allow-Origin', '*');
res.setHeader('Access-Control-Allow-Methods', 'POST, OPTIONS');
res.setHeader('Access-Control-Allow-Headers', 'Content-Type');
// 处理OPTIONS预检请求
if (req.method === 'OPTIONS') {
res.writeHead(200);
res.end();
return;
}
// 只处理POST请求到/api/chat
if (req.method === 'POST' && req.url === '/api/chat') {
let body = '';
req.on('data', chunk => {
body += chunk.toString();
});
req.on('end', () => {
try {
const requestData = JSON.parse(body);
// 准备发送到DeepSeek的数据
const deepseekData = JSON.stringify({
model: 'deepseek-chat',
messages: requestData.messages,
max_tokens: requestData.max_tokens || 200,
temperature: requestData.temperature || 0.8,
stream: false
});
// 调用DeepSeek API
const options = {
hostname: 'api.deepseek.com',
path: '/chat/completions',
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${DEEPSEEK_API_KEY}`,
'Content-Length': Buffer.byteLength(deepseekData)
}
};
const apiReq = https.request(options, (apiRes) => {
let responseData = '';
apiRes.on('data', chunk => {
responseData += chunk;
});
apiRes.on('end', () => {
res.writeHead(apiRes.statusCode, { 'Content-Type': 'application/json' });
res.end(responseData);
});
});
apiReq.on('error', (error) => {
console.error('DeepSeek API错误:', error);
res.writeHead(500, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({
error: 'API调用失败',
message: error.message
}));
});
apiReq.write(deepseekData);
apiReq.end();
} catch (error) {
console.error('请求处理错误:', error);
res.writeHead(400, { 'Content-Type': 'application/json' });
res.end(JSON.stringify({ error: '请求格式错误' }));
}
});
} else {
res.writeHead(404);
res.end('Not Found');
}
});
server.listen(PORT, () => {
console.log(`🚀 DeepSeek代理服务器运行在 http://localhost:${PORT}`);
console.log(`📡 前端请求地址: http://localhost:${PORT}/api/chat`);
});
|
2301_80381209/tks_kiro_kjdlxstr
|
proxy-server.js
|
JavaScript
|
unknown
| 3,266
|
// 测试 npm 是否安装成功的简单脚本
console.log("=== npm 安装测试 ===\n");
// 获取 npm 版本
const { execSync } = require('child_process');
try {
const npmVersion = execSync('npm --version', { encoding: 'utf-8' }).trim();
console.log(`✅ npm 已安装成功!`);
console.log(`📦 npm 版本: ${npmVersion}`);
const nodeVersion = execSync('node --version', { encoding: 'utf-8' }).trim();
console.log(`🟢 Node.js 版本: ${nodeVersion}`);
} catch (error) {
console.log("❌ npm 未安装或无法访问");
console.log("请访问 https://nodejs.org 下载安装 Node.js(包含 npm)");
}
|
2301_80381209/tks_kiro_kjdlxstr
|
test-npm.js
|
JavaScript
|
unknown
| 648
|
<?php
function getFileNames($filePath) {
$fileNames = [];
if (file_exists($filePath)) {
$file = fopen($filePath, 'r');
if ($file) {
while (($line = fgets($file))!== false) {
$fileNames[] = trim($line);
}
fclose($file);
}
}
return $fileNames;
}
function arrayDifference($array1, $array2) {
return array_diff($array1, $array2);
}
function writeDiffToFile($diffFileNames, $outputFilePath) {
// 检查输出文件所在目录是否存在,如果不存在则创建
$outputFileDir = dirname($outputFilePath);
if (!is_dir($outputFileDir)) {
mkdir($outputFileDir, 0777, true);
}
$fileHandle = fopen($outputFilePath, 'w');
if ($fileHandle) {
foreach ($diffFileNames as $fileName) {
fwrite($fileHandle, $fileName.PHP_EOL);
}
fclose($fileHandle);
}
}
while(true) {
// 获取 old_files 和 new_files 的文件路径
$oldFilesPath = 'file_span/old_files';
$newFilesPath = 'file_span/new_files';
// 定义输出文件路径
$outputFilePath = 'file_span/diff_files';
// 获取 old_files 和 new_files 中的文件名数组
$oldFileNames = getFileNames($oldFilesPath);
$newFileNames = getFileNames($newFilesPath);
// 计算文件名差集
$diffFileNames = arrayDifference($oldFileNames, $newFileNames);
// 将不同的文件名写入文件
writeDiffToFile($diffFileNames, $outputFilePath);
sleep(5);//不死马
}
?>
|
2301_80867077/awd_waf_5_span
|
Difference.php
|
PHP
|
unknown
| 1,576
|
<?php
function scanDirectory($directory, $outputFile) {
// 检查输出文件所在目录是否存在,如果不存在则创建
$fileSpanDir = dirname($outputFile);
if (!is_dir($fileSpanDir)) {
mkdir($fileSpanDir, 0777, true);
}
$fileHandle = fopen($outputFile, 'w'); // 直接以写入模式打开文件,如果文件存在则会被覆盖
scanRecursive($directory, $fileHandle);
fclose($fileHandle);
}
function scanRecursive($directory, $fileHandle) {
$files = scandir($directory);
foreach ($files as $file) {
if ($file == '.' || $file == '..') {
continue;
}
$path = $directory.'/'.$file;
if (is_dir($path)) {
scanRecursive($path, $fileHandle);
} else {
fwrite($fileHandle, $path.PHP_EOL);
}
}
}
// 调用函数,将 '/var/www/html/admin' 替换为实际的目标目录
while (true) {
scanDirectory('/var/www/html/admin', 'file_span/new_files');
// 休眠 10 秒
sleep(10);//这是一个不死马
}
?>
|
2301_80867077/awd_waf_5_span
|
file_span_new.php
|
PHP
|
unknown
| 1,086
|
<?php
function scanDirectory($directory, $outputFile) {
// 检查输出文件所在目录是否存在,如果不存在则创建
$fileSpanDir = dirname($outputFile);
if (!is_dir($fileSpanDir)) {
mkdir($fileSpanDir, 0777, true);
}
// 检查输出文件是否存在,如果不存在则创建
if (!file_exists($outputFile)) {
touch($outputFile);
}
$fileHandle = fopen($outputFile, 'w');
scanRecursive($directory, $fileHandle);
fclose($fileHandle);
}
function scanRecursive($directory, $fileHandle) {
$files = scandir($directory);
foreach ($files as $file) {
if ($file == '.' || $file == '..') {
continue;
}
$path = $directory.'/'.$file;
if (is_dir($path)) {
scanRecursive($path, $fileHandle);
} else {
fwrite($fileHandle, $path.PHP_EOL);
}
}
}
// 调用函数,你需要将 'your_target_directory' 替换为实际的目标目录
scanDirectory('/var/www/html/admin', 'file_span/old_files');
?>
|
2301_80867077/awd_waf_5_span
|
file_span_old.php
|
PHP
|
unknown
| 1,078
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Different Files</title>
<style>
table {
width: 100%;
border-collapse: collapse;
}
table, th, td {
border: 1px solid black;
}
th, td {
padding: 8px;
text-align: left;
}
</style>
<meta http-equiv="refresh" content="10">
</head>
<body>
<h2>不同文件</h2>
<table id="diffTable">
<thead>
<tr>
<th>文件名</th>
</tr>
</thead>
<tbody>
<?php
$filePath = '/var/www/html/filespan/file_span/diff_files';
if (file_exists($filePath)) {
$file = fopen($filePath, 'r');
if ($file) {
while (($line = fgets($file))!== false) {
$fileName = trim($line);
echo "<tr><td>$fileName</td></tr>";
}
fclose($file);
}
}
?>
</tbody>
</table>
</body>
</html>
|
2301_80867077/awd_waf_5_span
|
span.php
|
PHP
|
unknown
| 1,095
|
"""undoom-pdf-mcp: PDF转换工具MCP服务器
这是一个基于MCP (Model Context Protocol) 的PDF转换工具服务器,
集成了PDF转图片、Office文件转PDF等功能。
"""
__version__ = "0.2.3"
__author__ = "undoom"
__email__ = "kaikaihuhu666@163.com"
|
2301_80863610/undoom_pdf_mcp
|
undoom_pdf_mcp/__init__.py
|
Python
|
mit
| 265
|
#!/usr/bin/env python3
"""
__main__.py - 支持 python -m undoom_pdf_mcp 运行
"""
import asyncio
import sys
def main():
"""同步入口点函数"""
# 导入异步main函数
from .main import main as async_main
# 运行异步main函数
try:
asyncio.run(async_main())
except KeyboardInterrupt:
print("\n服务器已停止")
sys.exit(0)
except Exception as e:
print(f"服务器运行出错: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
|
2301_80863610/undoom_pdf_mcp
|
undoom_pdf_mcp/__main__.py
|
Python
|
mit
| 521
|
#!/usr/bin/env python3
"""
PDF转换工具MCP服务器
集成PDF转JPG、PDF批量转图片、Office文件转PDF等功能
"""
import asyncio
import json
import os
import tempfile
import gc
from typing import Any, Dict, List, Optional
from pathlib import Path
import fitz # PyMuPDF
from PIL import Image
try:
import win32com.client
WIN32_AVAILABLE = True
except ImportError:
WIN32_AVAILABLE = False
from mcp.server.models import InitializationOptions
from mcp.server import NotificationOptions, Server
from mcp.types import (
Resource,
Tool,
TextContent,
ImageContent,
EmbeddedResource,
LoggingLevel
)
server = Server("undoom-pdf-mcp")
class PDFConverter:
"""PDF转换工具类"""
@staticmethod
def pdf_to_images(pdf_path: str, pages: Optional[List[int]] = None,
quality: float = 2.0, output_dir: Optional[str] = None) -> List[str]:
"""将PDF转换为图片
Args:
pdf_path: PDF文件路径
pages: 要转换的页码列表,None表示转换所有页
quality: 图片质量倍数 (0.25, 0.5, 1.0, 2.0, 4.0)
output_dir: 输出目录,None表示使用PDF同目录
Returns:
生成的图片文件路径列表
"""
if not os.path.exists(pdf_path):
raise FileNotFoundError(f"PDF文件不存在: {pdf_path}")
# 打开PDF文档
doc = fitz.open(pdf_path)
# 设置输出目录
if output_dir is None:
base_name = os.path.splitext(os.path.basename(pdf_path))[0]
output_dir = os.path.join(os.path.dirname(pdf_path), f"{base_name}_images")
os.makedirs(output_dir, exist_ok=True)
# 确定要转换的页码
total_pages = len(doc)
if pages is None:
pages = list(range(1, total_pages + 1))
# 设置缩放矩阵
matrix = fitz.Matrix(quality, quality)
output_files = []
base_name = os.path.splitext(os.path.basename(pdf_path))[0]
try:
for page_num in pages:
if 1 <= page_num <= total_pages:
# 加载页面
page = doc.load_page(page_num - 1) # fitz使用0基索引
# 渲染为图片
pix = page.get_pixmap(matrix=matrix)
# 生成输出文件名
if len(pages) == 1 and len(pages) == total_pages:
output_file = os.path.join(output_dir, f"{base_name}.jpg")
else:
output_file = os.path.join(output_dir, f"{base_name}_page_{page_num}.jpg")
# 保存图片
pix.save(output_file)
output_files.append(output_file)
# 清理内存
pix = None
page = None
gc.collect()
finally:
doc.close()
return output_files
@staticmethod
def parse_page_numbers(page_string: str) -> List[int]:
"""解析页码字符串
Args:
page_string: 页码字符串,如 "1,2,3-5,7"
Returns:
页码列表
"""
if not page_string.strip():
return []
pages = set()
for part in page_string.split(','):
part = part.strip()
if '-' in part:
start, end = map(int, part.split('-', 1))
pages.update(range(start, end + 1))
else:
pages.add(int(part))
return sorted(list(pages))
@staticmethod
def batch_convert_pdfs(folder_path: str, page_settings: Dict[str, str],
quality: float = 2.0) -> Dict[str, List[str]]:
"""批量转换PDF文件
Args:
folder_path: PDF文件夹路径
page_settings: 文件名到页码设置的映射
quality: 图片质量倍数
Returns:
文件名到输出图片列表的映射
"""
results = {}
for filename, page_string in page_settings.items():
pdf_path = os.path.join(folder_path, filename)
if os.path.exists(pdf_path) and filename.lower().endswith('.pdf'):
try:
pages = PDFConverter.parse_page_numbers(page_string) if page_string else None
output_files = PDFConverter.pdf_to_images(pdf_path, pages, quality)
results[filename] = output_files
except Exception as e:
results[filename] = [f"错误: {str(e)}"]
return results
@staticmethod
def encrypt_pdf(pdf_path: str, password: str, output_path: Optional[str] = None) -> str:
"""加密PDF文件
Args:
pdf_path: PDF文件路径
password: 加密密码
output_path: 输出文件路径,None表示覆盖原文件
Returns:
加密后的PDF文件路径
"""
if not os.path.exists(pdf_path):
raise FileNotFoundError(f"PDF文件不存在: {pdf_path}")
if output_path is None:
base_name = os.path.splitext(pdf_path)[0]
output_path = f"{base_name}_encrypted.pdf"
doc = fitz.open(pdf_path)
try:
# 设置加密参数
encrypt_meth = fitz.PDF_ENCRYPT_AES_256 # 使用AES-256加密
owner_pass = password # 所有者密码
user_pass = password # 用户密码
# 设置权限(允许所有操作)
permissions = fitz.PDF_PERM_ACCESSIBILITY | fitz.PDF_PERM_PRINT | fitz.PDF_PERM_COPY | fitz.PDF_PERM_ANNOTATE
# 保存加密的PDF
doc.save(output_path,
encryption=encrypt_meth,
owner_pw=owner_pass,
user_pw=user_pass,
permissions=permissions)
return output_path
finally:
doc.close()
@staticmethod
def images_to_pdf(image_paths: List[str], output_path: str,
page_size: str = "A4") -> str:
"""将多张图片合并为PDF
Args:
image_paths: 图片文件路径列表
output_path: 输出PDF文件路径
page_size: 页面大小,如"A4", "A3", "Letter"等
Returns:
生成的PDF文件路径
"""
if not image_paths:
raise ValueError("图片路径列表不能为空")
# 检查所有图片文件是否存在
for img_path in image_paths:
if not os.path.exists(img_path):
raise FileNotFoundError(f"图片文件不存在: {img_path}")
# 创建新的PDF文档
doc = fitz.open()
# 定义页面大小
page_sizes = {
"A4": fitz.paper_rect("a4"),
"A3": fitz.paper_rect("a3"),
"A5": fitz.paper_rect("a5"),
"Letter": fitz.paper_rect("letter"),
"Legal": fitz.paper_rect("legal")
}
rect = page_sizes.get(page_size.upper(), fitz.paper_rect("a4"))
try:
for img_path in image_paths:
# 打开图片
img = Image.open(img_path)
# 转换为RGB模式(如果需要)
if img.mode != 'RGB':
img = img.convert('RGB')
# 保存为临时文件
with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as temp_file:
img.save(temp_file.name, 'JPEG', quality=95)
temp_path = temp_file.name
try:
# 创建新页面
page = doc.new_page(width=rect.width, height=rect.height)
# 插入图片
img_rect = fitz.Rect(0, 0, rect.width, rect.height)
page.insert_image(img_rect, filename=temp_path, keep_proportion=True)
finally:
# 清理临时文件
if os.path.exists(temp_path):
os.unlink(temp_path)
img.close()
# 保存PDF
doc.save(output_path)
return output_path
finally:
doc.close()
@staticmethod
def single_image_to_pdf(image_path: str, output_path: Optional[str] = None,
page_size: str = "A4") -> str:
"""将单张图片转换为PDF
Args:
image_path: 图片文件路径
output_path: 输出PDF文件路径,None表示自动生成
page_size: 页面大小
Returns:
生成的PDF文件路径
"""
if output_path is None:
base_name = os.path.splitext(image_path)[0]
output_path = f"{base_name}.pdf"
return PDFConverter.images_to_pdf([image_path], output_path, page_size)
class OfficeConverter:
"""Office文件转换工具类"""
@staticmethod
def word_to_pdf(word_path: str, output_path: Optional[str] = None) -> str:
"""Word转PDF"""
if not WIN32_AVAILABLE:
raise RuntimeError("需要安装pywin32库才能转换Office文件")
if not os.path.exists(word_path):
raise FileNotFoundError(f"Word文件不存在: {word_path}")
if output_path is None:
output_path = os.path.splitext(word_path)[0] + '.pdf'
word = None
try:
word = win32com.client.Dispatch("Word.Application")
word.Visible = False
word.DisplayAlerts = False
doc = word.Documents.Open(word_path)
doc.SaveAs(output_path, FileFormat=17) # 17 = PDF格式
doc.Close()
return output_path
finally:
if word:
word.Quit()
word = None
gc.collect()
@staticmethod
def excel_to_pdf(excel_path: str, output_path: Optional[str] = None) -> str:
"""Excel转PDF"""
if not WIN32_AVAILABLE:
raise RuntimeError("需要安装pywin32库才能转换Office文件")
if not os.path.exists(excel_path):
raise FileNotFoundError(f"Excel文件不存在: {excel_path}")
if output_path is None:
output_path = os.path.splitext(excel_path)[0] + '.pdf'
excel = None
try:
excel = win32com.client.Dispatch("Excel.Application")
excel.Visible = False
excel.DisplayAlerts = False
workbook = excel.Workbooks.Open(excel_path)
workbook.ExportAsFixedFormat(0, output_path) # 0 = PDF格式
workbook.Close()
return output_path
finally:
if excel:
excel.Quit()
excel = None
gc.collect()
@staticmethod
def ppt_to_pdf(ppt_path: str, output_path: Optional[str] = None) -> str:
"""PowerPoint转PDF"""
if not WIN32_AVAILABLE:
raise RuntimeError("需要安装pywin32库才能转换Office文件")
if not os.path.exists(ppt_path):
raise FileNotFoundError(f"PowerPoint文件不存在: {ppt_path}")
if output_path is None:
output_path = os.path.splitext(ppt_path)[0] + '.pdf'
ppt = None
try:
ppt = win32com.client.Dispatch("PowerPoint.Application")
ppt.Visible = True
presentation = ppt.Presentations.Open(ppt_path)
presentation.SaveAs(output_path, 32) # 32 = PDF格式
presentation.Close()
return output_path
finally:
if ppt:
ppt.Quit()
ppt = None
gc.collect()
@staticmethod
def batch_office_to_pdf(folder_path: str, file_types: List[str] = None) -> Dict[str, str]:
"""批量转换Office文件为PDF
Args:
folder_path: 文件夹路径
file_types: 要转换的文件类型列表,如['docx', 'xlsx', 'pptx']
Returns:
原文件名到PDF文件路径的映射
"""
if file_types is None:
file_types = ['doc', 'docx', 'xls', 'xlsx', 'ppt', 'pptx']
results = {}
for filename in os.listdir(folder_path):
file_ext = filename.lower().split('.')[-1]
if file_ext in file_types:
file_path = os.path.join(folder_path, filename)
try:
if file_ext in ['doc', 'docx']:
output_path = OfficeConverter.word_to_pdf(file_path)
elif file_ext in ['xls', 'xlsx']:
output_path = OfficeConverter.excel_to_pdf(file_path)
elif file_ext in ['ppt', 'pptx']:
output_path = OfficeConverter.ppt_to_pdf(file_path)
else:
continue
results[filename] = output_path
except Exception as e:
results[filename] = f"错误: {str(e)}"
return results
@server.list_tools()
async def handle_list_tools() -> List[Tool]:
"""列出可用的工具"""
return [
Tool(
name="pdf_to_images",
description="将PDF文件转换为图片",
inputSchema={
"type": "object",
"properties": {
"pdf_path": {
"type": "string",
"description": "PDF文件的绝对路径"
},
"pages": {
"type": "string",
"description": "要转换的页码,格式如'1,2,3-5',留空转换所有页",
"default": ""
},
"quality": {
"type": "number",
"description": "图片质量倍数,可选值:0.25, 0.5, 1.0, 2.0, 4.0",
"default": 2.0
},
"output_dir": {
"type": "string",
"description": "输出目录路径,留空使用PDF同目录",
"default": ""
}
},
"required": ["pdf_path"]
}
),
Tool(
name="batch_convert_pdfs",
description="批量转换PDF文件为图片",
inputSchema={
"type": "object",
"properties": {
"folder_path": {
"type": "string",
"description": "包含PDF文件的文件夹路径"
},
"page_settings": {
"type": "object",
"description": "文件名到页码设置的映射,如{'file1.pdf': '1,2,3-5'}",
"additionalProperties": {"type": "string"}
},
"quality": {
"type": "number",
"description": "图片质量倍数",
"default": 2.0
}
},
"required": ["folder_path", "page_settings"]
}
),
Tool(
name="word_to_pdf",
description="将Word文档转换为PDF",
inputSchema={
"type": "object",
"properties": {
"word_path": {
"type": "string",
"description": "Word文件的绝对路径"
},
"output_path": {
"type": "string",
"description": "输出PDF文件路径,留空自动生成",
"default": ""
}
},
"required": ["word_path"]
}
),
Tool(
name="excel_to_pdf",
description="将Excel文档转换为PDF",
inputSchema={
"type": "object",
"properties": {
"excel_path": {
"type": "string",
"description": "Excel文件的绝对路径"
},
"output_path": {
"type": "string",
"description": "输出PDF文件路径,留空自动生成",
"default": ""
}
},
"required": ["excel_path"]
}
),
Tool(
name="ppt_to_pdf",
description="将PowerPoint文档转换为PDF",
inputSchema={
"type": "object",
"properties": {
"ppt_path": {
"type": "string",
"description": "PowerPoint文件的绝对路径"
},
"output_path": {
"type": "string",
"description": "输出PDF文件路径,留空自动生成",
"default": ""
}
},
"required": ["ppt_path"]
}
),
Tool(
name="batch_office_to_pdf",
description="批量转换Office文件为PDF",
inputSchema={
"type": "object",
"properties": {
"folder_path": {
"type": "string",
"description": "包含Office文件的文件夹路径"
},
"file_types": {
"type": "array",
"items": {"type": "string"},
"description": "要转换的文件类型列表,如['docx', 'xlsx', 'pptx']",
"default": ["doc", "docx", "xls", "xlsx", "ppt", "pptx"]
}
},
"required": ["folder_path"]
}
),
Tool(
name="get_pdf_info",
description="获取PDF文件信息",
inputSchema={
"type": "object",
"properties": {
"pdf_path": {
"type": "string",
"description": "PDF文件的绝对路径"
}
},
"required": ["pdf_path"]
}
),
Tool(
name="encrypt_pdf",
description="加密PDF文件",
inputSchema={
"type": "object",
"properties": {
"pdf_path": {
"type": "string",
"description": "PDF文件的绝对路径"
},
"password": {
"type": "string",
"description": "加密密码"
},
"output_path": {
"type": "string",
"description": "输出PDF文件路径,留空自动生成",
"default": ""
}
},
"required": ["pdf_path", "password"]
}
),
Tool(
name="images_to_pdf",
description="将多张图片合并为PDF",
inputSchema={
"type": "object",
"properties": {
"image_paths": {
"type": "array",
"items": {"type": "string"},
"description": "图片文件路径列表"
},
"output_path": {
"type": "string",
"description": "输出PDF文件路径"
},
"page_size": {
"type": "string",
"description": "页面大小,如A4、A3、Letter等",
"default": "A4"
}
},
"required": ["image_paths", "output_path"]
}
),
Tool(
name="single_image_to_pdf",
description="将单张图片转换为PDF",
inputSchema={
"type": "object",
"properties": {
"image_path": {
"type": "string",
"description": "图片文件的绝对路径"
},
"output_path": {
"type": "string",
"description": "输出PDF文件路径,留空自动生成",
"default": ""
},
"page_size": {
"type": "string",
"description": "页面大小,如A4、A3、Letter等",
"default": "A4"
}
},
"required": ["image_path"]
}
)
]
@server.call_tool()
async def handle_call_tool(name: str, arguments: Dict[str, Any]) -> List[TextContent]:
"""处理工具调用"""
try:
if name == "pdf_to_images":
pdf_path = arguments["pdf_path"]
pages_str = arguments.get("pages", "")
quality = arguments.get("quality", 2.0)
output_dir = arguments.get("output_dir", "") or None
pages = PDFConverter.parse_page_numbers(pages_str) if pages_str else None
output_files = PDFConverter.pdf_to_images(pdf_path, pages, quality, output_dir)
return [TextContent(
type="text",
text=f"成功转换PDF为图片!\n生成的图片文件:\n" + "\n".join(output_files)
)]
elif name == "batch_convert_pdfs":
folder_path = arguments["folder_path"]
page_settings = arguments["page_settings"]
quality = arguments.get("quality", 2.0)
results = PDFConverter.batch_convert_pdfs(folder_path, page_settings, quality)
result_text = "批量转换结果:\n"
for filename, output_files in results.items():
result_text += f"\n{filename}:\n"
if isinstance(output_files, list) and output_files:
if output_files[0].startswith("错误:"):
result_text += f" {output_files[0]}\n"
else:
result_text += "\n".join(f" - {f}" for f in output_files) + "\n"
return [TextContent(type="text", text=result_text)]
elif name == "word_to_pdf":
word_path = arguments["word_path"]
output_path = arguments.get("output_path", "") or None
result_path = OfficeConverter.word_to_pdf(word_path, output_path)
return [TextContent(
type="text",
text=f"成功将Word文档转换为PDF!\n输出文件:{result_path}"
)]
elif name == "excel_to_pdf":
excel_path = arguments["excel_path"]
output_path = arguments.get("output_path", "") or None
result_path = OfficeConverter.excel_to_pdf(excel_path, output_path)
return [TextContent(
type="text",
text=f"成功将Excel文档转换为PDF!\n输出文件:{result_path}"
)]
elif name == "ppt_to_pdf":
ppt_path = arguments["ppt_path"]
output_path = arguments.get("output_path", "") or None
result_path = OfficeConverter.ppt_to_pdf(ppt_path, output_path)
return [TextContent(
type="text",
text=f"成功将PowerPoint文档转换为PDF!\n输出文件:{result_path}"
)]
elif name == "batch_office_to_pdf":
folder_path = arguments["folder_path"]
file_types = arguments.get("file_types", ["doc", "docx", "xls", "xlsx", "ppt", "pptx"])
results = OfficeConverter.batch_office_to_pdf(folder_path, file_types)
result_text = "批量转换Office文件结果:\n"
for filename, output_path in results.items():
if output_path.startswith("错误:"):
result_text += f"\n{filename}: {output_path}"
else:
result_text += f"\n{filename} -> {output_path}"
return [TextContent(type="text", text=result_text)]
elif name == "get_pdf_info":
pdf_path = arguments["pdf_path"]
if not os.path.exists(pdf_path):
raise FileNotFoundError(f"PDF文件不存在: {pdf_path}")
doc = fitz.open(pdf_path)
try:
info = {
"文件路径": pdf_path,
"文件大小": f"{os.path.getsize(pdf_path) / 1024 / 1024:.2f} MB",
"页数": len(doc),
"标题": doc.metadata.get('title', '未知'),
"作者": doc.metadata.get('author', '未知'),
"创建时间": doc.metadata.get('creationDate', '未知'),
"修改时间": doc.metadata.get('modDate', '未知')
}
info_text = "PDF文件信息:\n"
for key, value in info.items():
info_text += f"{key}: {value}\n"
return [TextContent(type="text", text=info_text)]
finally:
doc.close()
elif name == "encrypt_pdf":
pdf_path = arguments["pdf_path"]
password = arguments["password"]
output_path = arguments.get("output_path", "") or None
result_path = PDFConverter.encrypt_pdf(pdf_path, password, output_path)
return [TextContent(
type="text",
text=f"成功加密PDF文件!\n输出文件:{result_path}"
)]
elif name == "images_to_pdf":
image_paths = arguments["image_paths"]
output_path = arguments["output_path"]
page_size = arguments.get("page_size", "A4")
result_path = PDFConverter.images_to_pdf(image_paths, output_path, page_size)
return [TextContent(
type="text",
text=f"成功将{len(image_paths)}张图片合并为PDF!\n输出文件:{result_path}"
)]
elif name == "single_image_to_pdf":
image_path = arguments["image_path"]
output_path = arguments.get("output_path", "") or None
page_size = arguments.get("page_size", "A4")
result_path = PDFConverter.single_image_to_pdf(image_path, output_path, page_size)
return [TextContent(
type="text",
text=f"成功将图片转换为PDF!\n输出文件:{result_path}"
)]
else:
return [TextContent(
type="text",
text=f"未知的工具: {name}"
)]
except Exception as e:
return [TextContent(
type="text",
text=f"执行工具 {name} 时发生错误: {str(e)}"
)]
async def main():
"""主函数"""
# 运行服务器
from mcp.server.stdio import stdio_server
async with stdio_server() as (read_stream, write_stream):
await server.run(
read_stream,
write_stream,
InitializationOptions(
server_name="undoom-pdf-mcp",
server_version="0.2.3",
capabilities=server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
),
),
)
if __name__ == "__main__":
asyncio.run(main())
|
2301_80863610/undoom_pdf_mcp
|
undoom_pdf_mcp/main.py
|
Python
|
mit
| 29,544
|
"""抖音数据分析 MCP 服务器包"""
from .douyin_mcp_server import DouyinMCPServer, main, cli_main
__version__ = "0.1.2"
__all__ = ["DouyinMCPServer", "main", "cli_main"]
|
2301_80863610/undoom-douyin-data-analysis
|
undoom_douyin_data_analysis/__init__.py
|
Python
|
mit
| 178
|
#!/usr/bin/env python3
"""
抖音数据分析 MCP 服务器
基于原始的抖音作品分析工具开发的 MCP 服务器版本
提供数据采集、分析和导出功能
"""
import asyncio
import json
import logging
import os
import time
from datetime import datetime
from typing import Any, Dict, List, Optional
from urllib.parse import quote
import pandas as pd
from bs4 import BeautifulSoup
from collections import Counter
import jieba
import traceback
try:
from DrissionPage import ChromiumPage
from DrissionPage.errors import ElementNotFoundError
DRISSION_AVAILABLE = True
except ImportError:
DRISSION_AVAILABLE = False
logging.warning("DrissionPage not available. Some features may be limited.")
from mcp.server.models import InitializationOptions
from mcp.server import NotificationOptions, Server
from mcp.types import (
Resource,
Tool,
TextContent,
ImageContent,
EmbeddedResource,
LoggingLevel
)
import mcp.types as types
import mcp.server.stdio
# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("douyin-mcp")
class DouyinMCPServer:
"""抖音数据分析 MCP 服务器"""
def __init__(self):
self.server = Server("douyin-analyzer")
self.collected_data = {
'videos': [],
'users': []
}
self.page = None
self.is_running = False
# 设置工具
self._setup_tools()
# 设置资源
self._setup_resources()
# 设置处理器
self._setup_handlers()
def _setup_tools(self):
"""设置可用工具"""
@self.server.list_tools()
async def handle_list_tools() -> list[Tool]:
"""列出所有可用工具"""
return [
Tool(
name="search_douyin_videos",
description="搜索抖音视频数据",
inputSchema={
"type": "object",
"properties": {
"keyword": {
"type": "string",
"description": "搜索关键词"
},
"scroll_count": {
"type": "integer",
"description": "滚动次数,默认为10",
"default": 10
},
"delay": {
"type": "number",
"description": "每次滚动的延迟时间(秒),默认为2",
"default": 2.0
}
},
"required": ["keyword"]
}
),
Tool(
name="search_douyin_users",
description="搜索抖音用户数据",
inputSchema={
"type": "object",
"properties": {
"keyword": {
"type": "string",
"description": "搜索关键词"
},
"scroll_count": {
"type": "integer",
"description": "滚动次数,默认为10",
"default": 10
},
"delay": {
"type": "number",
"description": "每次滚动的延迟时间(秒),默认为2",
"default": 2.0
}
},
"required": ["keyword"]
}
),
Tool(
name="analyze_interaction_data",
description="分析视频互动数据(点赞、评论等)",
inputSchema={
"type": "object",
"properties": {},
"required": []
}
),
Tool(
name="analyze_content_length",
description="分析视频标题长度分布",
inputSchema={
"type": "object",
"properties": {},
"required": []
}
),
Tool(
name="analyze_keywords",
description="分析视频标题中的高频词汇",
inputSchema={
"type": "object",
"properties": {
"top_n": {
"type": "integer",
"description": "返回前N个高频词汇,默认为50",
"default": 50
}
},
"required": []
}
),
Tool(
name="export_data",
description="导出采集的数据",
inputSchema={
"type": "object",
"properties": {
"format": {
"type": "string",
"enum": ["json", "excel", "csv"],
"description": "导出格式",
"default": "json"
},
"data_type": {
"type": "string",
"enum": ["videos", "users", "all"],
"description": "导出数据类型",
"default": "videos"
},
"filename": {
"type": "string",
"description": "文件名(不包含扩展名)",
"default": "douyin_data"
}
},
"required": []
}
),
Tool(
name="get_data_summary",
description="获取当前采集数据的摘要信息",
inputSchema={
"type": "object",
"properties": {},
"required": []
}
),
Tool(
name="clear_data",
description="清空当前采集的数据",
inputSchema={
"type": "object",
"properties": {
"data_type": {
"type": "string",
"enum": ["videos", "users", "all"],
"description": "要清空的数据类型",
"default": "all"
}
},
"required": []
}
)
]
@self.server.call_tool()
async def handle_call_tool(name: str, arguments: dict) -> list[types.TextContent]:
"""处理工具调用"""
try:
if name == "search_douyin_videos":
return await self._search_douyin_videos(**arguments)
elif name == "search_douyin_users":
return await self._search_douyin_users(**arguments)
elif name == "analyze_interaction_data":
return await self._analyze_interaction_data()
elif name == "analyze_content_length":
return await self._analyze_content_length()
elif name == "analyze_keywords":
return await self._analyze_keywords(**arguments)
elif name == "export_data":
return await self._export_data(**arguments)
elif name == "get_data_summary":
return await self._get_data_summary()
elif name == "clear_data":
return await self._clear_data(**arguments)
else:
raise ValueError(f"Unknown tool: {name}")
except Exception as e:
logger.error(f"Tool {name} failed: {e}")
return [types.TextContent(type="text", text=f"错误: {str(e)}")]
def _setup_resources(self):
"""设置资源"""
@self.server.list_resources()
async def handle_list_resources() -> list[Resource]:
"""列出可用资源"""
return [
Resource(
uri="douyin://data/videos",
name="视频数据",
description="当前采集的视频数据",
mimeType="application/json"
),
Resource(
uri="douyin://data/users",
name="用户数据",
description="当前采集的用户数据",
mimeType="application/json"
),
Resource(
uri="douyin://analysis/summary",
name="数据摘要",
description="数据采集和分析摘要",
mimeType="text/plain"
)
]
@self.server.read_resource()
async def handle_read_resource(uri: str) -> str:
"""读取资源内容"""
if uri == "douyin://data/videos":
return json.dumps(self.collected_data, ensure_ascii=False, indent=2)
elif uri == "douyin://data/users":
return json.dumps(self.user_data, ensure_ascii=False, indent=2)
elif uri == "douyin://analysis/summary":
return self._generate_summary()
else:
raise ValueError(f"Unknown resource: {uri}")
def _setup_handlers(self):
"""设置其他处理器"""
pass
async def _search_douyin_videos(self, keyword: str, scroll_count: int = 10, delay: float = 2.0) -> list[types.TextContent]:
"""搜索抖音视频"""
if not DRISSION_AVAILABLE:
return [types.TextContent(type="text", text="错误: DrissionPage 未安装,无法进行数据采集")]
try:
# 初始化浏览器
if not await self._init_browser():
return [types.TextContent(type="text", text="错误: 浏览器初始化失败")]
# 构建搜索URL
search_url = f"https://www.douyin.com/search/{quote(keyword)}?source=normal_search&type=video"
logger.info(f"搜索视频: {search_url}")
# 访问页面
self.page.get(search_url)
await asyncio.sleep(5) # 等待页面加载
# 开始滚动采集
new_data = await self._scroll_and_collect(scroll_count, delay, 'video')
# 添加到已采集数据
for data in new_data:
if data not in self.collected_data['videos']:
self.collected_data['videos'].append(data)
result_text = f"成功采集到 {len(new_data)} 条视频数据\n"
result_text += f"当前总共有 {len(self.collected_data['videos'])} 条视频数据\n\n"
# 显示前5条数据作为预览
if new_data:
result_text += "最新采集的数据预览:\n"
for i, data in enumerate(new_data[:5]):
result_text += f"{i+1}. {data.get('title', 'N/A')} - {data.get('author', 'N/A')} - {data.get('likes', 'N/A')}赞\n"
return [types.TextContent(type="text", text=result_text)]
except Exception as e:
logger.error(f"搜索视频失败: {e}")
return [types.TextContent(type="text", text=f"搜索失败: {str(e)}")]
finally:
await self._cleanup_browser()
async def _search_douyin_users(self, keyword: str, scroll_count: int = 10, delay: float = 2.0) -> list[types.TextContent]:
"""搜索抖音用户"""
if not DRISSION_AVAILABLE:
return [types.TextContent(type="text", text="错误: DrissionPage 未安装,无法进行数据采集")]
try:
# 初始化浏览器
if not await self._init_browser():
return [types.TextContent(type="text", text="错误: 浏览器初始化失败")]
# 构建搜索URL
search_url = f"https://www.douyin.com/search/{quote(keyword)}?source=normal_search&type=user"
logger.info(f"搜索用户: {search_url}")
# 访问页面
self.page.get(search_url)
await asyncio.sleep(5) # 等待页面加载
# 开始滚动采集
new_data = await self._scroll_and_collect(scroll_count, delay, 'user')
# 添加到已采集数据
for data in new_data:
if data not in self.collected_data['users']:
self.collected_data['users'].append(data)
result_text = f"成功采集到 {len(new_data)} 条用户数据\n"
result_text += f"当前总共有 {len(self.collected_data['users'])} 条用户数据\n\n"
# 显示前5条数据作为预览
if new_data:
result_text += "最新采集的用户数据预览:\n"
for i, data in enumerate(new_data[:5]):
result_text += f"{i+1}. {data.get('title', 'N/A')} - {data.get('douyin_id', 'N/A')} - {data.get('followers', 'N/A')}粉丝\n"
return [types.TextContent(type="text", text=result_text)]
except Exception as e:
logger.error(f"搜索用户失败: {e}")
return [types.TextContent(type="text", text=f"搜索失败: {str(e)}")]
finally:
await self._cleanup_browser()
async def _init_browser(self) -> bool:
"""初始化浏览器"""
try:
if self.page is None:
self.page = ChromiumPage()
await asyncio.sleep(2) # 等待浏览器启动
return True
except Exception as e:
logger.error(f"浏览器初始化失败: {e}")
return False
async def _cleanup_browser(self):
"""清理浏览器资源"""
try:
if self.page:
self.page.quit()
self.page = None
except Exception as e:
logger.error(f"清理浏览器失败: {e}")
async def _scroll_and_collect(self, scroll_count: int, delay: float, data_type: str) -> List[Dict]:
"""滚动页面并收集数据"""
collected = []
try:
last_height = self.page.run_js("return document.body.scrollHeight")
for i in range(scroll_count):
# 滚动页面
self.page.run_js("window.scrollTo(0, document.body.scrollHeight)")
await asyncio.sleep(delay)
# 检查是否到达底部
new_height = self.page.run_js("return document.body.scrollHeight")
if new_height == last_height:
logger.info("已到达页面底部")
break
last_height = new_height
# 获取页面源码并解析
page_source = self.page.html
soup = BeautifulSoup(page_source, 'html.parser')
# 根据数据类型选择不同的提取方法
if data_type == 'user':
new_data = self._extract_user_data(soup)
else:
# 直接传递整个soup对象给视频提取方法
new_data = self._extract_video_items(soup)
logger.info(f"本次滚动提取到 {len(new_data)} 条视频数据")
# 添加新数据(去重)
for data in new_data:
if data not in collected:
collected.append(data)
logger.info(f"滚动 {i+1}/{scroll_count},当前采集 {len(collected)} 条数据")
except Exception as e:
logger.error(f"滚动采集失败: {e}")
return collected
def _extract_video_items(self, html) -> List[Dict]:
"""提取视频数据"""
video_data = []
try:
# 查找视频项目 - 更新为新的页面结构
video_items = html.select('li.SwZLHMKk')
logger.info(f"找到 {len(video_items)} 个视频项目")
for item in video_items:
try:
data = self._extract_basic_info(item)
self._extract_stats_info(item, data)
self._extract_description(item, data)
# 清理和格式化数据
data = self._clean_and_format_data(data)
if data['title']: # 只添加有标题的数据
video_data.append(data)
except Exception as e:
logger.error(f"提取单个视频数据失败: {e}")
continue
except Exception as e:
logger.error(f"提取视频数据失败: {e}")
return video_data
def _extract_basic_info(self, item) -> Dict:
"""提取基本信息"""
data = {
'title': '',
'author': '',
'video_link': '',
'publish_time': '',
'likes': '0',
'comments': '0',
'shares': '0'
}
try:
# 提取标题 - 新的选择器
title_elem = item.select_one('div.VDYK8Xd7')
if title_elem:
data['title'] = title_elem.get_text(strip=True)
# 提取作者 - 新的选择器
author_elem = item.select_one('span.MZNczJmS')
if author_elem:
data['author'] = author_elem.get_text(strip=True)
# 提取视频链接 - 新的选择器
link_elem = item.select_one('a.hY8lWHgA')
if link_elem:
href = link_elem.get('href', '')
if href.startswith('//'):
data['video_link'] = 'https:' + href
else:
data['video_link'] = href
# 提取发布时间
time_elem = item.select_one('span.faDtinfi')
if time_elem:
data['publish_time'] = time_elem.get_text(strip=True)
except Exception as e:
logger.error(f"提取基本信息失败: {e}")
return data
def _extract_stats_info(self, item, data: Dict):
"""提取统计信息"""
try:
# 查找点赞数 - 新的选择器
likes_elem = item.select_one('span.cIiU4Muu')
if likes_elem:
likes_text = likes_elem.get_text(strip=True)
data['likes'] = likes_text
# 暂时无法找到评论和分享数的具体选择器,保持默认值
# 如果需要,可以进一步分析页面结构
except Exception as e:
logger.error(f"提取统计信息失败: {e}")
def _extract_description(self, item, data: Dict):
"""提取描述信息"""
try:
# 尝试从标题元素中获取描述,或者查找其他可能的描述元素
desc_elem = item.select_one('div.VDYK8Xd7')
if desc_elem:
# 如果标题元素包含描述信息,使用它
data['description'] = desc_elem.get_text(strip=True)
else:
# 否则保持为空
data['description'] = ''
except Exception as e:
logger.error(f"提取描述信息失败: {e}")
def _clean_and_format_data(self, data: Dict) -> Dict:
"""清理和格式化数据"""
try:
# 清理文本
for key in ['title', 'author', 'description']:
if key in data:
data[key] = self._clean_text(data[key])
# 格式化数字
for key in ['likes', 'comments', 'shares']:
if key in data:
data[key] = self._format_number(data[key])
# 添加采集时间
data['collected_at'] = datetime.now().isoformat()
except Exception as e:
logger.error(f"清理格式化数据失败: {e}")
return data
def _extract_user_data(self, html) -> List[Dict]:
"""提取用户数据"""
user_data = []
try:
# 查找用户项目
user_items = html.select("div.search-result-card > a.hY8lWHgA.poLTDMYS")
for item in user_items:
try:
# 获取用户链接
user_link = item.get('href', '')
# 获取标题
title_elem = item.select_one('div.XQwChAbX p.v9LWb7QE span span span span span')
title = title_elem.get_text(strip=True) if title_elem else ''
# 获取头像URL
avatar_elem = item.select_one('img.RlLOO79h')
avatar_url = avatar_elem.get('src', '') if avatar_elem else ''
# 获取统计数据
stats_div = item.select_one('div.jjebLXt0')
douyin_id = ''
likes = '0'
followers = '0'
if stats_div:
spans = stats_div.select('span')
for span in spans:
text = span.get_text(strip=True)
if '抖音号:' in text or '抖音号:' in text:
id_span = span.select_one('span')
if id_span:
douyin_id = id_span.get_text(strip=True)
elif '获赞' in text:
likes = text.replace('获赞', '').strip()
elif '粉丝' in text:
followers = text.replace('粉丝', '').strip()
# 获取简介
desc_elem = item.select_one('p.Kdb5Km3i span span span span span')
description = desc_elem.get_text(strip=True) if desc_elem else ''
# 构建数据
data = {
'title': title,
'douyin_id': douyin_id,
'likes': likes,
'followers': followers,
'description': description,
'avatar_url': avatar_url,
'user_link': user_link,
'collected_at': datetime.now().isoformat()
}
if data['title']: # 只添加有标题的数据
user_data.append(data)
except Exception as e:
logger.error(f"提取单个用户数据失败: {e}")
continue
except Exception as e:
logger.error(f"提取用户数据失败: {e}")
return user_data
def _clean_text(self, text: str) -> str:
"""清理文本"""
if not text:
return ""
return text.strip().replace('\n', ' ').replace('\r', ' ')
def _format_number(self, num_str: str) -> str:
"""格式化数字字符串"""
if not num_str:
return "0"
# 移除非数字字符,保留万、千等单位
import re
cleaned = re.sub(r'[^0-9万千.]+', '', str(num_str))
return cleaned if cleaned else "0"
async def _analyze_interaction_data(self) -> list[types.TextContent]:
"""分析互动数据"""
if not self.collected_data['videos']:
return [types.TextContent(type="text", text="没有可分析的视频数据")]
try:
# 将点赞数转换为数字
likes_data = []
for data in self.collected_data['videos']:
likes = str(data.get('likes', '0'))
try:
if '万' in likes:
num = float(likes.replace('万', '')) * 10000
likes_data.append(int(num))
else:
likes_data.append(int(likes))
except (ValueError, TypeError):
continue
if not likes_data:
return [types.TextContent(type="text", text="没有有效的点赞数据可分析")]
# 计算统计数据
total_likes = sum(likes_data)
avg_likes = total_likes / len(likes_data)
max_likes = max(likes_data)
min_likes = min(likes_data)
# 生成报告
report = "===== 互动数据分析报告 =====\n\n"
report += f"总视频数: {len(self.collected_data['videos'])}\n"
report += f"总点赞数: {self._format_large_number(total_likes)}\n"
report += f"平均点赞数: {self._format_large_number(int(avg_likes))}\n"
report += f"最高点赞数: {self._format_large_number(max_likes)}\n"
report += f"最低点赞数: {self._format_large_number(min_likes)}\n\n"
# 点赞数分布
ranges = [(0, 100), (101, 1000), (1001, 10000), (10001, 100000), (100001, float('inf'))]
report += "点赞数分布:\n"
for start, end in ranges:
count = sum(1 for likes in likes_data if start <= likes <= end)
range_text = f"{start}-{end}" if end != float('inf') else f"{start}+"
percentage = (count / len(likes_data)) * 100
report += f"{range_text}: {count}个 ({percentage:.1f}%)\n"
return [types.TextContent(type="text", text=report)]
except Exception as e:
logger.error(f"分析互动数据失败: {e}")
return [types.TextContent(type="text", text=f"分析失败: {str(e)}")]
async def _analyze_content_length(self) -> list[types.TextContent]:
"""分析内容长度"""
if not self.collected_data['videos']:
return [types.TextContent(type="text", text="没有可分析的视频数据")]
try:
# 计算标题长度
title_lengths = [len(data.get('title', '')) for data in self.collected_data['videos']]
title_lengths = [length for length in title_lengths if length > 0]
if not title_lengths:
return [types.TextContent(type="text", text="没有有效的标题数据可分析")]
# 计算统计数据
avg_length = sum(title_lengths) / len(title_lengths)
max_length = max(title_lengths)
min_length = min(title_lengths)
# 生成报告
report = "===== 内容长度分析报告 =====\n\n"
report += f"平均标题长度: {avg_length:.1f}字\n"
report += f"最长标题: {max_length}字\n"
report += f"最短标题: {min_length}字\n\n"
# 添加长度分布统计
length_ranges = [(0, 10), (11, 20), (21, 30), (31, 50), (51, 100), (101, float('inf'))]
report += "标题长度分布:\n"
for start, end in length_ranges:
count = sum(1 for length in title_lengths if start <= length <= end)
range_text = f"{start}-{end}字" if end != float('inf') else f"{start}字以上"
percentage = (count / len(title_lengths)) * 100
report += f"{range_text}: {count}个 ({percentage:.1f}%)\n"
return [types.TextContent(type="text", text=report)]
except Exception as e:
logger.error(f"分析内容长度失败: {e}")
return [types.TextContent(type="text", text=f"分析失败: {str(e)}")]
async def _analyze_keywords(self, top_n: int = 50) -> list[types.TextContent]:
"""分析高频词汇"""
if not self.collected_data['videos']:
return [types.TextContent(type="text", text="没有可分析的视频数据")]
try:
# 合并所有标题文本
all_titles = ' '.join(data.get('title', '') for data in self.collected_data['videos'])
if not all_titles.strip():
return [types.TextContent(type="text", text="没有有效的标题文本可分析")]
# 设置停用词
stop_words = {
'的', '了', '是', '在', '我', '有', '和', '就',
'都', '而', '及', '与', '着', '或', '等', '为',
'一个', '没有', '这个', '那个', '但是', '而且',
'只是', '不过', '这样', '一样', '一直', '一些',
'这', '那', '也', '你', '我们', '他们', '它们',
'把', '被', '让', '向', '往', '但', '去', '又',
'能', '好', '给', '到', '看', '想', '要', '会',
'多', '能', '这些', '那些', '什么', '怎么', '如何',
'为什么', '可以', '因为', '所以', '应该', '可能', '应该'
}
# 使用jieba进行分词
words = []
for word in jieba.cut(all_titles):
if len(word) > 1 and word not in stop_words:
words.append(word)
if not words:
return [types.TextContent(type="text", text="分词后没有有效词汇")]
# 统计词频
word_counts = Counter(words)
# 生成报告
report = "===== 高频词汇分析报告 =====\n\n"
report += f"总标题数: {len(self.collected_data['videos'])}\n"
report += f"总词汇量: {len(words)}\n"
report += f"不同词汇数: {len(word_counts)}\n\n"
# 显示高频词汇
report += f"高频词汇 TOP {top_n}:\n"
report += "-" * 40 + "\n"
report += "排名\t词汇\t\t出现次数\t频率\n"
report += "-" * 40 + "\n"
for rank, (word, count) in enumerate(word_counts.most_common(top_n), 1):
frequency = (count / len(words)) * 100
report += f"{rank}\t{word}\t\t{count}\t\t{frequency:.2f}%\n"
return [types.TextContent(type="text", text=report)]
except Exception as e:
logger.error(f"分析高频词汇失败: {e}")
return [types.TextContent(type="text", text=f"分析失败: {str(e)}")]
def _format_large_number(self, num: int) -> str:
"""格式化大数字显示"""
if num >= 10000:
return f"{num/10000:.1f}万"
return str(num)
async def _export_data(self, format: str = "json", data_type: str = "videos", filename: str = "douyin_data") -> list[types.TextContent]:
"""导出数据"""
try:
# 选择要导出的数据
if data_type == "users":
data_to_export = self.collected_data['users']
if not data_to_export:
return [types.TextContent(type="text", text="没有用户数据可导出")]
elif data_type == "videos":
data_to_export = self.collected_data['videos']
if not data_to_export:
return [types.TextContent(type="text", text="没有视频数据可导出")]
elif data_type == "all":
if not self.collected_data['videos'] and not self.collected_data['users']:
return [types.TextContent(type="text", text="没有数据可导出")]
data_to_export = {
"videos": self.collected_data['videos'],
"users": self.collected_data['users']
}
else:
return [types.TextContent(type="text", text="无效的数据类型")]
# 生成文件名(使用绝对路径)
import os as os_module
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
file_extension = "xlsx" if format == "excel" else format
base_dir = os_module.path.dirname(os_module.path.abspath(__file__))
full_filename = os_module.path.join(base_dir, f"{filename}_{timestamp}.{file_extension}")
# 根据格式导出
if format == "json":
with open(full_filename, 'w', encoding='utf-8') as f:
json.dump(data_to_export, f, ensure_ascii=False, indent=2)
elif format == "excel":
# 尝试使用可用的Excel引擎
excel_engine = None
for engine in ['openpyxl', 'xlsxwriter']:
try:
# 测试引擎是否可用
test_df = pd.DataFrame({'test': [1]})
test_filename = f"test_engine_{engine}.xlsx"
test_df.to_excel(test_filename, engine=engine, index=False)
os_module.remove(test_filename) # 清理测试文件
excel_engine = engine
break
except Exception:
continue
if not excel_engine:
return [types.TextContent(type="text", text="错误: 没有可用的Excel引擎,请安装 openpyxl 或 xlsxwriter")]
if data_type == "all":
with pd.ExcelWriter(full_filename, engine=excel_engine) as writer:
if self.collected_data['videos']:
pd.DataFrame(self.collected_data['videos']).to_excel(writer, sheet_name='Videos', index=False)
if self.collected_data['users']:
pd.DataFrame(self.collected_data['users']).to_excel(writer, sheet_name='Users', index=False)
else:
pd.DataFrame(data_to_export).to_excel(full_filename, index=False, engine=excel_engine)
elif format == "csv":
if data_type == "all":
# CSV格式不支持多表,分别导出
if self.collected_data['videos']:
video_filename = f"{filename}_videos_{timestamp}.csv"
pd.DataFrame(self.collected_data['videos']).to_csv(video_filename, index=False, encoding='utf-8-sig')
if self.collected_data['users']:
user_filename = f"{filename}_users_{timestamp}.csv"
pd.DataFrame(self.collected_data['users']).to_csv(user_filename, index=False, encoding='utf-8-sig')
return [types.TextContent(type="text", text=f"数据已导出为CSV格式\n视频数据: {video_filename if self.collected_data['videos'] else '无'}\n用户数据: {user_filename if self.collected_data['users'] else '无'}")]
else:
pd.DataFrame(data_to_export).to_csv(full_filename, index=False, encoding='utf-8-sig')
return [types.TextContent(type="text", text=f"数据已成功导出到: {full_filename}")]
except Exception as e:
logger.error(f"导出数据失败: {e}")
logger.error(f"详细错误信息: {traceback.format_exc()}")
return [types.TextContent(type="text", text=f"导出失败: {str(e)}\n详细错误: {traceback.format_exc()}")]
async def _get_data_summary(self) -> list[types.TextContent]:
"""获取数据摘要"""
summary = self._generate_summary()
return [types.TextContent(type="text", text=summary)]
def _generate_summary(self) -> str:
"""生成数据摘要"""
summary = "===== 抖音数据采集摘要 =====\n\n"
# 视频数据摘要
summary += f"视频数据: {len(self.collected_data['videos'])} 条\n"
if self.collected_data['videos']:
total_likes = 0
for data in self.collected_data['videos']:
likes = str(data.get('likes', '0'))
try:
if '万' in likes:
num = float(likes.replace('万', '')) * 10000
total_likes += int(num)
else:
total_likes += int(likes)
except (ValueError, TypeError):
continue
summary += f"总点赞数: {self._format_large_number(total_likes)}\n"
summary += f"平均点赞数: {self._format_large_number(int(total_likes / len(self.collected_data['videos'])) if self.collected_data['videos'] else 0)}\n"
summary += "\n"
# 用户数据摘要
summary += f"用户数据: {len(self.collected_data['users'])} 条\n"
if self.collected_data['users']:
total_followers = 0
for data in self.collected_data['users']:
followers = str(data.get('followers', '0'))
try:
if '万' in followers:
num = float(followers.replace('万', '')) * 10000
total_followers += int(num)
else:
total_followers += int(followers)
except (ValueError, TypeError):
continue
summary += f"总粉丝数: {self._format_large_number(total_followers)}\n"
summary += f"平均粉丝数: {self._format_large_number(int(total_followers / len(self.collected_data['users'])) if self.collected_data['users'] else 0)}\n"
summary += "\n"
summary += f"最后更新时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"
return summary
async def _clear_data(self, data_type: str = "all") -> list[types.TextContent]:
"""清空数据"""
try:
if data_type == "videos":
count = len(self.collected_data['videos'])
self.collected_data['videos'].clear()
return [types.TextContent(type="text", text=f"已清空 {count} 条视频数据")]
elif data_type == "users":
count = len(self.collected_data['users'])
self.collected_data['users'].clear()
return [types.TextContent(type="text", text=f"已清空 {count} 条用户数据")]
elif data_type == "all":
video_count = len(self.collected_data['videos'])
user_count = len(self.collected_data['users'])
self.collected_data['videos'].clear()
self.collected_data['users'].clear()
return [types.TextContent(type="text", text=f"已清空所有数据\n视频数据: {video_count} 条\n用户数据: {user_count} 条")]
else:
return [types.TextContent(type="text", text="无效的数据类型")]
except Exception as e:
logger.error(f"清空数据失败: {e}")
return [types.TextContent(type="text", text=f"清空失败: {str(e)}")]
async def main():
"""主函数"""
server = DouyinMCPServer()
# 运行服务器
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await server.server.run(
read_stream,
write_stream,
InitializationOptions(
server_name="douyin-analyzer",
server_version="1.0.0",
capabilities=server.server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
),
),
)
def cli_main():
"""命令行入口点"""
asyncio.run(main())
if __name__ == "__main__":
cli_main()
|
2301_80863610/undoom-douyin-data-analysis
|
undoom_douyin_data_analysis/douyin_mcp_server.py
|
Python
|
mit
| 41,756
|
<?php
// 获取客户端IP地址
function get_client_ip() {
$ipaddress = '';
// 首先检查 REMOTE_ADDR
if (isset($_SERVER['REMOTE_ADDR'])) {
$ipaddress = $_SERVER['REMOTE_ADDR'];
} elseif (isset($_SERVER['HTTP_CLIENT_IP'])) {
$ipaddress = $_SERVER['HTTP_CLIENT_IP'];
} elseif (isset($_SERVER['HTTP_X_FORWARDED_FOR'])) {
$ipaddress = $_SERVER['HTTP_X_FORWARDED_FOR'];
} elseif (isset($_SERVER['HTTP_X_FORWARDED'])) {
$ipaddress = $_SERVER['HTTP_X_FORWARDED'];
} elseif (isset($_SERVER['HTTP_FORWARDED_FOR'])) {
$ipaddress = $_SERVER['HTTP_FORWARDED_FOR'];
} elseif (isset($_SERVER['HTTP_FORWARDED'])) {
$ipaddress = $_SERVER['HTTP_FORWARDED'];
} else {
$ipaddress = 'UNKNOWN';
}
return $ipaddress;
}
// 存储 POST 和 GET 数据的函数
function store_ip_data($basePath) {
$ip = get_client_ip();
$filePath = $basePath. '/ip_value/'. $ip. '.txt'; // 将文件存储在 ip_value 目录中
$checkFilePath = $basePath. '/ip_check/'. $ip. '.txt'; // 存储检查结果的文件路径
// 合并 $_GET 和 $_POST 数据
$combined = array_merge($_GET, $_POST);
// 检查存储数据的目录是否存在,若不存在则创建
$directory = $basePath. '/ip_value';
if (!is_dir($directory)) {
mkdir($directory, 0777, true);
}
// 检查 ip_check 目录是否存在,若不存在则创建
$checkDirectory = $basePath. '/ip_check';
if (!is_dir($checkDirectory)) {
mkdir($checkDirectory, 0777, true);
}
// 打开文件,如果文件不存在会创建
$file = fopen($filePath, 'a');
if ($file) {
foreach ($combined as $key => $value) {
// 将数据存储到文件中,每个数据后加换行符
fwrite($file, $key. ': '. $value. PHP_EOL);
}
fclose($file);
} else {
echo "无法打开存储数据的文件";
return;
}
// 检查 ip_check 中的文件内容是否为 2,如果是则拦截访问
check_ip_status($ip);
// 检查是否包含敏感字符
$isSensitive = false; // 标志位,用于标记是否包含敏感字符
$sensitiveChars = array('<', '"', "'", 'php', 'select', '(', ')', '[', ']', 'eval', '*', ':');
$ipValueFileContent = file_get_contents($filePath); // 直接读取 ip_value 对应 IP 文件的内容
foreach ($sensitiveChars as $char) {
if (strpos($ipValueFileContent, $char)!== false) {
$isSensitive = true;
break;
}
}
// 先检查文件是否存在,若存在,以只读方式打开,若内容为 2,拦截访问
if (file_exists($checkFilePath)) {
$checkFile = fopen($checkFilePath, 'r');
$content = fread($checkFile, filesize($checkFilePath));
if (trim($content) == '2') {
die("ip block!!!!");
}
fclose($checkFile);
}
// 打开检查文件,如果文件不存在会创建
$checkFile = fopen($checkFilePath, 'w');
if ($checkFile) {
// 先读取文件内容
$content = '';
if (file_exists($checkFilePath)) {
$content = file_get_contents($checkFilePath);
}
if ($isSensitive) {
fwrite($checkFile, '1'); // 若包含敏感字符,将检查文件内容写为 1
} else {
// 若文件内容已经为 1,则不修改为 0
if (trim($content) == '1') {
fwrite($checkFile, '1');
} else {
fwrite($checkFile, '0');
}
}
fclose($checkFile);
} else {
echo "无法打开检查文件";
}
// 再次检查 ip_check 中的文件内容是否为 2,如果是则拦截访问
check_ip_status($ip);
}
// 检查 IP 状态的函数
function check_ip_status($ip) {
$checkFilePath = $basePath. '/ip_check/'. $ip. '.txt';
if (file_exists($checkFilePath)) {
$content = file_get_contents($checkFilePath);
if (trim($content) == '2') {
die("ip block!!!!");
}
}
}
// 调用存储数据的函数
$basePath = '/var/www/html/ip_span'; // 可修改为不同的路径
store_ip_data($basePath);
|
2301_80867077/ip_span
|
ip_span.php
|
PHP
|
unknown
| 4,228
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>IP Check Files</title>
<meta http-equiv="refresh" content="10"> <!-- 页面每 10 秒自动刷新 -->
</head>
<body>
<h1>IP Check Files with Content 1</h1>
<table border="1">
<thead>
<tr>
<th>IP Address</th>
<th>File Link</th>
<th>Action</th>
</tr>
</thead>
<tbody>
<?php
$ip_check_dir = 'ip_check';
$ip_value_dir = 'ip_value';
if (is_dir($ip_check_dir)) {
$files = scandir($ip_check_dir);
foreach ($files as $file) {
if ($file!= '.' && $file!= '..') {
$file_path = $ip_check_dir. '/'. $file;
try {
$content = file_get_contents($file_path);
if (trim($content) == '1') {
$ip = str_replace('.txt', '', $file);
$ip_value_file_path = $ip_value_dir. '/'. $ip. '.txt';
echo '<tr>';
echo '<td>'. $ip. '</td>';
echo '<td><a href="'. $ip_value_file_path. '">'. $ip. '</a></td>';
echo '<td><form action="" method="post"><input type="hidden" name="file" value="'. $file_path. '"><input type="submit" value="Block IP"></form></td>';
echo '</tr>';
}
} catch (Exception $e) {
echo '<tr><td colspan="3">Error reading '. $file. ': '. $e->getMessage(). '</td></tr>';
}
}
}
} else {
echo '<tr><td colspan="3">ip_check directory does not exist</td></tr>';
}
if ($_SERVER['REQUEST_METHOD'] == 'POST' && isset($_POST['file'])) {
$file_path = $_POST['file'];
try {
$file = fopen($file_path, 'w');
if ($file) {
fwrite($file, '2');
fclose($file);
// 刷新页面
echo '<script>window.location.reload();</script>';
} else {
echo 'Error opening file for writing';
}
} catch (Exception $e) {
echo 'Error updating file: '. $e->getMessage();
}
}
?>
</tbody>
</table>
</body>
</html>
|
2301_80867077/ip_span
|
start.php
|
PHP
|
unknown
| 2,482
|
"""Undoom Uninstaller MCP - A Windows program uninstaller MCP server."""
__version__ = "0.1.7"
__author__ = "Undoom"
__email__ = "kaikaihuhu666@163.com"
from .server import main, cli_main
__all__ = ["main", "cli_main"]
|
2301_80863610/undoom_Uninstaller_mcp
|
undoom_uninstaller_mcp/__init__.py
|
Python
|
mit
| 221
|
"""配置管理模块"""
import os
from typing import Dict, List, Tuple
import winreg
# 注册表路径配置
REGISTRY_PATHS: List[Tuple[int, str]] = [
(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall"),
(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\WOW6432Node\Microsoft\Windows\CurrentVersion\Uninstall"),
(winreg.HKEY_CURRENT_USER, r"SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall")
]
# 常见残留文件位置
COMMON_RESIDUE_LOCATIONS: List[str] = [
"APPDATA",
"LOCALAPPDATA",
"PROGRAMDATA"
]
# 文件大小单位
SIZE_UNITS: List[str] = ['B', 'KB', 'MB', 'GB', 'TB']
# 默认配置
DEFAULT_CONFIG: Dict[str, any] = {
"max_programs_display": 100,
"default_sort_by": "name",
"include_stats": True,
"report_filename": "system_programs_report"
}
|
2301_80863610/undoom_Uninstaller_mcp
|
undoom_uninstaller_mcp/config.py
|
Python
|
mit
| 824
|
"""程序管理核心模块"""
import os
import winreg
import subprocess
from typing import List, Dict, Optional, Tuple
from datetime import datetime
from .config import REGISTRY_PATHS
from .utils import (
get_directory_size,
format_size,
format_install_date,
get_drive_letter,
safe_remove_directory,
get_common_residue_paths
)
class ProgramInfo:
"""程序信息类"""
def __init__(self, data: Dict[str, any]):
self.name = data.get("name", "")
self.publisher = data.get("publisher", "")
self.version = data.get("version", "")
self.size = data.get("size", 0)
self.install_location = data.get("install_location", "")
self.uninstall_string = data.get("uninstall_string", "")
self.install_date = data.get("install_date", "未知")
self.drive_letter = data.get("drive_letter", "未知")
self.reg_key = data.get("reg_key", "")
self.hive = data.get("hive", winreg.HKEY_LOCAL_MACHINE)
def to_dict(self) -> Dict[str, any]:
"""转换为字典"""
return {
"name": self.name,
"publisher": self.publisher,
"version": self.version,
"size": self.size,
"install_location": self.install_location,
"uninstall_string": self.uninstall_string,
"install_date": self.install_date,
"drive_letter": self.drive_letter,
"reg_key": self.reg_key,
"hive": self.hive
}
class ProgramManager:
"""程序管理器"""
def __init__(self):
self.programs: List[ProgramInfo] = []
self.load_installed_programs()
def load_installed_programs(self) -> None:
"""从注册表加载已安装程序列表"""
self.programs = []
for hive, path in REGISTRY_PATHS:
self._load_from_registry_path(hive, path)
# 按名称排序
self.programs.sort(key=lambda x: x.name.lower())
def _load_from_registry_path(self, hive: int, path: str) -> None:
"""从指定注册表路径加载程序"""
try:
with winreg.OpenKey(hive, path) as key:
for i in range(0, winreg.QueryInfoKey(key)[0]):
try:
subkey_name = winreg.EnumKey(key, i)
program_info = self._extract_program_info(hive, path, subkey_name)
if program_info:
self.programs.append(program_info)
except (WindowsError, ValueError):
continue
except WindowsError:
pass
def _extract_program_info(self, hive: int, path: str, subkey_name: str) -> Optional[ProgramInfo]:
"""从注册表子键提取程序信息"""
try:
with winreg.OpenKey(winreg.OpenKey(hive, path), subkey_name) as subkey:
# 获取程序名称
try:
name = winreg.QueryValueEx(subkey, "DisplayName")[0]
if not name:
return None
except:
return None
# 获取其他信息
publisher = self._get_registry_value(subkey, "Publisher", "")
version = self._get_registry_value(subkey, "DisplayVersion", "")
install_location = self._get_registry_value(subkey, "InstallLocation", "")
uninstall_string = self._get_registry_value(subkey, "UninstallString", "")
# 处理安装日期
install_date_raw = self._get_registry_value(subkey, "InstallDate", "")
install_date = format_install_date(install_date_raw)
# 获取盘符
drive_letter = get_drive_letter(install_location)
# 获取程序大小
size = get_directory_size(install_location)
program_data = {
"name": name,
"publisher": publisher,
"version": version,
"size": size,
"install_location": install_location,
"uninstall_string": uninstall_string,
"install_date": install_date,
"drive_letter": drive_letter,
"reg_key": f"{path}\\{subkey_name}",
"hive": hive
}
return ProgramInfo(program_data)
except (WindowsError, ValueError):
return None
def _get_registry_value(self, key, value_name: str, default: str = "") -> str:
"""安全获取注册表值"""
try:
return winreg.QueryValueEx(key, value_name)[0] or default
except:
return default
def search_programs(self, query: str) -> List[ProgramInfo]:
"""搜索程序"""
query = query.lower()
results = []
for program in self.programs:
if (query in program.name.lower() or
query in program.publisher.lower()):
results.append(program)
return results
def get_program_by_name(self, name: str) -> Optional[ProgramInfo]:
"""根据名称获取程序"""
for program in self.programs:
if program.name == name:
return program
return None
def uninstall_program(self, program: ProgramInfo) -> Tuple[bool, str]:
"""卸载程序"""
if not program.uninstall_string:
return False, "该程序没有找到卸载命令!"
try:
# 运行卸载命令
if program.uninstall_string.lower().endswith(".msi"):
# MSI 包
cmd = f'msiexec /x "{program.uninstall_string}" /quiet'
else:
# 普通卸载程序
cmd = program.uninstall_string
subprocess.Popen(cmd, shell=True)
return True, f"正在卸载 {program.name}..."
except Exception as e:
return False, f"启动卸载程序失败: {str(e)}"
def force_remove_program(self, program: ProgramInfo) -> Tuple[bool, str]:
"""强制删除程序"""
errors = []
# 删除安装目录
if program.install_location and os.path.isdir(program.install_location):
success, message = safe_remove_directory(program.install_location)
if not success:
errors.append(f"删除安装目录失败: {message}")
# 删除注册表项
try:
key_parts = program.reg_key.split("\\")
parent_path = "\\".join(key_parts[:-1])
subkey_name = key_parts[-1]
with winreg.OpenKey(program.hive, parent_path, 0, winreg.KEY_ALL_ACCESS) as key:
winreg.DeleteKey(key, subkey_name)
except Exception as e:
errors.append(f"删除注册表项失败: {str(e)}")
if errors:
return False, "; ".join(errors)
else:
return True, f"{program.name} 已被强制删除!"
def clean_residues(self, program: ProgramInfo) -> Tuple[bool, str]:
"""清理程序残留"""
residue_paths = []
# 检查安装目录
if program.install_location and os.path.isdir(program.install_location):
residue_paths.append(program.install_location)
# 检查常见残留位置
common_paths = get_common_residue_paths(program.name)
for path in common_paths:
if os.path.exists(path):
residue_paths.append(path)
if not residue_paths:
return True, "该程序没有找到残留文件。"
# 删除残留文件
errors = []
deleted = []
for path in residue_paths:
success, message = safe_remove_directory(path)
if success:
deleted.append(path)
else:
errors.append(message)
if errors:
return False, "; ".join(errors)
else:
return True, f"已删除残留文件: {', '.join(deleted)}"
def get_statistics(self) -> Dict[str, any]:
"""获取统计信息"""
total_programs = len(self.programs)
# 按盘符统计
drive_stats = {}
for program in self.programs:
drive = program.drive_letter
drive_stats[drive] = drive_stats.get(drive, 0) + 1
# 按发布商统计
publisher_stats = {}
for program in self.programs:
publisher = program.publisher
if publisher and publisher != "未知":
publisher_stats[publisher] = publisher_stats.get(publisher, 0) + 1
return {
"total_programs": total_programs,
"drive_distribution": drive_stats,
"publisher_distribution": publisher_stats
}
|
2301_80863610/undoom_Uninstaller_mcp
|
undoom_uninstaller_mcp/program_manager.py
|
Python
|
mit
| 9,205
|
"""报告生成模块"""
import os
from typing import List, Dict
from datetime import datetime
from .program_manager import ProgramInfo
from .utils import format_size, escape_markdown, truncate_text
class ReportGenerator:
"""报告生成器"""
@staticmethod
def generate_markdown_table(programs: List[ProgramInfo], title: str = "系统已安装程序列表") -> str:
"""生成美化的Markdown格式程序列表"""
if not programs:
return f"# 📋 {title}\n\n> 暂无程序信息。"
# 美化的标题和概览
markdown = f"# 📋 {title}\n\n"
markdown += f"> 🔍 **发现 {len(programs)} 个已安装程序**\n\n"
# 快速统计概览
drive_stats = {}
publisher_stats = {}
total_size = 0
for program in programs:
drive = program.drive_letter
drive_stats[drive] = drive_stats.get(drive, 0) + 1
if program.publisher and program.publisher != "未知":
publisher_stats[program.publisher] = publisher_stats.get(program.publisher, 0) + 1
if program.size > 0:
total_size += program.size
# 统计信息卡片
markdown += "## 📊 快速概览\n\n"
markdown += "| 📈 统计项目 | 📊 数值 | 💾 详情 |\n"
markdown += "|------------|--------|--------|\n"
markdown += f"| **总程序数** | `{len(programs)}` | 系统中检测到的程序总数 |\n"
markdown += f"| **总占用空间** | `{format_size(total_size)}` | 已计算程序的总大小 |\n"
markdown += f"| **主要盘符** | `{max(drive_stats.items(), key=lambda x: x[1])[0] if drive_stats else '未知'} ({max(drive_stats.values()) if drive_stats else 0}个)` | 程序最多的安装盘符 |\n"
top_publisher = max(publisher_stats.items(), key=lambda x: x[1]) if publisher_stats else ("未知", 0)
markdown += f"| **主要发布商** | `{top_publisher[0][:20]}` | {top_publisher[1]}个程序 |\n\n"
# 盘符分布可视化
if drive_stats:
markdown += "### 💿 盘符分布\n\n"
for drive, count in sorted(drive_stats.items(), key=lambda x: x[1], reverse=True):
percentage = (count / len(programs)) * 100
bar_length = int(percentage / 5) # 每5%一个方块
bar = "█" * bar_length + "░" * (20 - bar_length)
markdown += f"**{drive}** `{count:3d}个` {bar} `{percentage:5.1f}%`\n\n"
# 程序详细列表
markdown += "## 📦 程序详细列表\n\n"
# 按类别分组显示
categories = {
"🛠️ 开发工具": [],
"🌐 浏览器与网络": [],
"🎮 游戏娱乐": [],
"🔧 系统组件": [],
"📱 移动开发": [],
"☁️ 云服务工具": [],
"📊 其他应用": []
}
for program in programs:
name_lower = program.name.lower()
publisher_lower = program.publisher.lower()
if any(keyword in name_lower for keyword in ['studio', 'ide', 'clion', 'intellij', 'git', 'jdk', 'java', 'cursor', 'codebuddy']):
categories["🛠️ 开发工具"].append(program)
elif any(keyword in name_lower for keyword in ['chrome', 'browser', 'firefox', 'edge']):
categories["🌐 浏览器与网络"].append(program)
elif any(keyword in name_lower for keyword in ['game', 'steam', 'epic', 'dead cells']):
categories["🎮 游戏娱乐"].append(program)
elif any(keyword in publisher_lower for keyword in ['microsoft', 'intel']) and 'visual studio' not in name_lower:
categories["🔧 系统组件"].append(program)
elif any(keyword in name_lower for keyword in ['android', 'maui', 'deveco', 'huawei']):
categories["📱 移动开发"].append(program)
elif any(keyword in name_lower for keyword in ['ai', 'copilot', 'figma', 'apifox', 'apipost']):
categories["☁️ 云服务工具"].append(program)
else:
categories["📊 其他应用"].append(program)
# 显示各类别
for category, progs in categories.items():
if progs:
markdown += f"### {category} ({len(progs)}个)\n\n"
markdown += "| 🏷️ 程序名称 | 🏢 发布商 | 📋 版本 | 💾 大小 | 📅 安装日期 | 💿 盘符 |\n"
markdown += "|------------|---------|-------|-------|----------|------|\n"
for program in progs:
name = escape_markdown(truncate_text(program.name, 25))
publisher = escape_markdown(truncate_text(program.publisher, 18))
version = escape_markdown(truncate_text(program.version, 12))
size = escape_markdown(format_size(program.size))
install_date = program.install_date if program.install_date != "未知" else "❓"
drive_letter = program.drive_letter
# 添加状态图标
size_icon = "💾" if program.size > 100*1024*1024 else "📦" if program.size > 0 else "❓"
markdown += f"| **{name}** | {publisher} | `{version}` | {size_icon} {size} | {install_date} | **{drive_letter}** |\n"
markdown += "\n"
return markdown
@staticmethod
def generate_enhanced_markdown_report(
programs: List[ProgramInfo],
filename: str = "system_programs_report",
include_stats: bool = True
) -> tuple[bool, str]:
"""生成增强版Markdown报告并保存到文件"""
if not programs:
return False, "没有程序信息可生成报告"
try:
# 生成报告内容
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
markdown = f"# 系统已安装程序详细报告\n\n"
markdown += f"**生成时间:** {current_time}\n\n"
markdown += f"**报告说明:** 本报告包含系统中所有已安装程序的详细信息\n\n"
# 基本统计信息
if include_stats:
markdown += ReportGenerator._generate_statistics_section(programs)
# 程序列表表格
markdown += "## 📋 程序详细列表\n\n"
markdown += "| 序号 | 程序名称 | 发布商 | 版本 | 大小 | 安装日期 | 盘符 | 安装位置 |\n"
markdown += "|------|----------|--------|------|------|----------|------|----------|\n"
# 表格内容
for i, program in enumerate(programs, 1):
name = escape_markdown(truncate_text(program.name, 35))
publisher = escape_markdown(truncate_text(program.publisher, 25))
version = escape_markdown(truncate_text(program.version, 20))
size = escape_markdown(format_size(program.size))
install_date = program.install_date
drive_letter = program.drive_letter
install_location = escape_markdown(truncate_text(program.install_location, 50))
markdown += f"| {i} | {name} | {publisher} | {version} | {size} | {install_date} | {drive_letter} | {install_location} |\n"
# 添加页脚信息
markdown += ReportGenerator._generate_footer_section()
# 保存到文件
output_path = os.path.join(os.getcwd(), f"{filename}.md")
with open(output_path, 'w', encoding='utf-8') as f:
f.write(markdown)
return True, f"报告已成功生成并保存到: {output_path}"
except Exception as e:
return False, f"生成报告失败: {str(e)}"
@staticmethod
def _generate_statistics_section(programs: List[ProgramInfo]) -> str:
"""生成统计信息部分"""
markdown = "## 📊 统计概览\n\n"
markdown += f"- **总程序数量:** {len(programs)}\n"
# 按盘符统计
drive_stats = {}
for program in programs:
drive = program.drive_letter
drive_stats[drive] = drive_stats.get(drive, 0) + 1
markdown += "- **按盘符分布:**\n"
for drive, count in sorted(drive_stats.items()):
percentage = (count / len(programs)) * 100
markdown += f" - {drive}: {count} 个程序 ({percentage:.1f}%)\n"
# 按发布商统计(前10名)
publisher_stats = {}
for program in programs:
publisher = program.publisher
if publisher and publisher != "未知":
publisher_stats[publisher] = publisher_stats.get(publisher, 0) + 1
top_publishers = sorted(publisher_stats.items(), key=lambda x: x[1], reverse=True)[:10]
if top_publishers:
markdown += "- **主要软件发布商(前10名):**\n"
for publisher, count in top_publishers:
markdown += f" - {publisher}: {count} 个程序\n"
markdown += "\n"
return markdown
@staticmethod
def _generate_footer_section() -> str:
"""生成页脚部分"""
markdown = "\n\n---\n\n"
markdown += "**报告生成工具:** undoom-uninstaller-mcp\n\n"
markdown += "**注意事项:**\n"
markdown += "- 本报告仅显示通过Windows注册表检测到的已安装程序\n"
markdown += "- 某些便携式软件或手动安装的程序可能不会出现在此列表中\n"
markdown += "- 程序大小信息可能不完全准确,仅供参考\n"
return markdown
|
2301_80863610/undoom_Uninstaller_mcp
|
undoom_uninstaller_mcp/report_generator.py
|
Python
|
mit
| 10,109
|
"""MCP服务器主模块 - 重构版本"""
import asyncio
import json
from typing import List
from mcp.server.models import InitializationOptions
from mcp.server import NotificationOptions, Server
from mcp.types import Tool, TextContent
import mcp.types as types
from .program_manager import ProgramManager, ProgramInfo
from .report_generator import ReportGenerator
from .config import DEFAULT_CONFIG
from .utils import format_size
# 创建服务器实例
server = Server("undoom-uninstaller-mcp")
# 创建程序管理器实例
program_manager = ProgramManager()
report_generator = ReportGenerator()
@server.list_tools()
async def handle_list_tools() -> List[Tool]:
"""列出可用的工具"""
return [
Tool(
name="list_programs",
description="列出所有已安装的程序",
inputSchema={
"type": "object",
"properties": {
"search": {
"type": "string",
"description": "搜索关键词(可选)"
}
}
}
),
Tool(
name="get_program_details",
description="获取指定程序的详细信息",
inputSchema={
"type": "object",
"properties": {
"program_name": {
"type": "string",
"description": "程序名称"
}
},
"required": ["program_name"]
}
),
Tool(
name="uninstall_program",
description="卸载指定程序",
inputSchema={
"type": "object",
"properties": {
"program_name": {
"type": "string",
"description": "要卸载的程序名称"
}
},
"required": ["program_name"]
}
),
Tool(
name="force_remove_program",
description="强制删除程序(删除文件和注册表项)",
inputSchema={
"type": "object",
"properties": {
"program_name": {
"type": "string",
"description": "要强制删除的程序名称"
}
},
"required": ["program_name"]
}
),
Tool(
name="clean_residues",
description="清理程序残留文件",
inputSchema={
"type": "object",
"properties": {
"program_name": {
"type": "string",
"description": "要清理残留的程序名称"
}
},
"required": ["program_name"]
}
),
Tool(
name="refresh_programs",
description="刷新程序列表",
inputSchema={
"type": "object",
"properties": {}
}
),
Tool(
name="show_all_programs_detailed",
description="显示所有程序的详细信息,包括名称、安装时间和盘符",
inputSchema={
"type": "object",
"properties": {
"limit": {
"type": "integer",
"description": "限制返回的程序数量,默认为100",
"default": 100
},
"sort_by": {
"type": "string",
"description": "排序字段:name(名称)、install_date(安装时间)、drive_letter(盘符)",
"enum": ["name", "install_date", "drive_letter"],
"default": "name"
}
}
}
),
Tool(
name="generate_markdown_report",
description="生成系统程序信息的Markdown报告文件",
inputSchema={
"type": "object",
"properties": {
"filename": {
"type": "string",
"description": "输出文件名(不包含扩展名),默认为'system_programs_report'",
"default": "system_programs_report"
},
"limit": {
"type": "integer",
"description": "限制返回的程序数量,默认为200",
"default": 200
},
"sort_by": {
"type": "string",
"description": "排序字段:name(名称)、install_date(安装时间)、drive_letter(盘符)",
"enum": ["name", "install_date", "drive_letter"],
"default": "name"
},
"include_stats": {
"type": "boolean",
"description": "是否包含详细统计信息,默认为true",
"default": True
}
}
}
)
]
@server.call_tool()
async def handle_call_tool(name: str, arguments: dict | None) -> List[types.TextContent]:
"""处理工具调用"""
if arguments is None:
arguments = {}
try:
if name == "list_programs":
return await _handle_list_programs(arguments)
elif name == "get_program_details":
return await _handle_get_program_details(arguments)
elif name == "uninstall_program":
return await _handle_uninstall_program(arguments)
elif name == "force_remove_program":
return await _handle_force_remove_program(arguments)
elif name == "clean_residues":
return await _handle_clean_residues(arguments)
elif name == "refresh_programs":
return await _handle_refresh_programs(arguments)
elif name == "show_all_programs_detailed":
return await _handle_show_all_programs_detailed(arguments)
elif name == "generate_markdown_report":
return await _handle_generate_markdown_report(arguments)
else:
return [types.TextContent(type="text", text=f"错误:未知工具 '{name}'")]
except Exception as e:
return [types.TextContent(type="text", text=f"工具执行错误: {str(e)}")]
async def _handle_list_programs(arguments: dict) -> List[types.TextContent]:
"""处理列出程序请求"""
search_query = arguments.get("search", "")
if search_query:
programs = program_manager.search_programs(search_query)
title = f"搜索结果 - '{search_query}'"
else:
programs = program_manager.programs
title = "系统已安装程序列表"
# 限制返回数量
programs = programs[:50]
# 生成Markdown表格
markdown_table = report_generator.generate_markdown_table(programs, title)
return [types.TextContent(type="text", text=markdown_table)]
async def _handle_get_program_details(arguments: dict) -> List[types.TextContent]:
"""处理获取程序详情请求"""
program_name = arguments.get("program_name")
if not program_name:
return [types.TextContent(type="text", text="错误:请提供程序名称")]
program = program_manager.get_program_by_name(program_name)
if not program:
return [types.TextContent(type="text", text=f"错误:未找到程序 '{program_name}'")]
details = {
"name": program.name,
"publisher": program.publisher,
"version": program.version,
"size": format_size(program.size),
"install_location": program.install_location,
"install_date": program.install_date,
"drive_letter": program.drive_letter,
"uninstall_string": program.uninstall_string,
"reg_key": program.reg_key
}
return [types.TextContent(
type="text",
text=f"程序详细信息:\n" + json.dumps(details, ensure_ascii=False, indent=2)
)]
async def _handle_uninstall_program(arguments: dict) -> List[types.TextContent]:
"""处理卸载程序请求"""
program_name = arguments.get("program_name")
if not program_name:
return [types.TextContent(type="text", text="错误:请提供程序名称")]
program = program_manager.get_program_by_name(program_name)
if not program:
return [types.TextContent(type="text", text=f"错误:未找到程序 '{program_name}'")]
success, message = program_manager.uninstall_program(program)
return [types.TextContent(type="text", text=message)]
async def _handle_force_remove_program(arguments: dict) -> List[types.TextContent]:
"""处理强制删除程序请求"""
program_name = arguments.get("program_name")
if not program_name:
return [types.TextContent(type="text", text="错误:请提供程序名称")]
program = program_manager.get_program_by_name(program_name)
if not program:
return [types.TextContent(type="text", text=f"错误:未找到程序 '{program_name}'")]
success, message = program_manager.force_remove_program(program)
return [types.TextContent(type="text", text=message)]
async def _handle_clean_residues(arguments: dict) -> List[types.TextContent]:
"""处理清理残留请求"""
program_name = arguments.get("program_name")
if not program_name:
return [types.TextContent(type="text", text="错误:请提供程序名称")]
program = program_manager.get_program_by_name(program_name)
if not program:
return [types.TextContent(type="text", text=f"错误:未找到程序 '{program_name}'")]
success, message = program_manager.clean_residues(program)
return [types.TextContent(type="text", text=message)]
async def _handle_refresh_programs(arguments: dict) -> List[types.TextContent]:
"""处理刷新程序列表请求"""
program_manager.load_installed_programs()
return [types.TextContent(
type="text",
text=f"程序列表已刷新,共加载 {len(program_manager.programs)} 个程序"
)]
async def _handle_show_all_programs_detailed(arguments: dict) -> List[types.TextContent]:
"""处理显示详细程序列表请求"""
limit = arguments.get("limit", DEFAULT_CONFIG["max_programs_display"])
sort_by = arguments.get("sort_by", DEFAULT_CONFIG["default_sort_by"])
programs = program_manager.programs.copy()
# 排序
if sort_by == "name":
programs.sort(key=lambda x: x.name.lower())
elif sort_by == "install_date":
programs.sort(key=lambda x: x.install_date or "0000-00-00")
elif sort_by == "drive_letter":
programs.sort(key=lambda x: x.drive_letter)
# 限制数量
programs = programs[:limit]
# 生成标题
sort_names = {
"name": "程序名称",
"install_date": "安装日期",
"drive_letter": "盘符"
}
title = f"系统程序详细信息(按{sort_names.get(sort_by, sort_by)}排序)"
# 生成Markdown表格
markdown_table = report_generator.generate_markdown_table(programs, title)
return [types.TextContent(type="text", text=markdown_table)]
async def _handle_generate_markdown_report(arguments: dict) -> List[types.TextContent]:
"""处理生成Markdown报告请求"""
filename = arguments.get("filename", DEFAULT_CONFIG["report_filename"])
limit = arguments.get("limit", 200)
sort_by = arguments.get("sort_by", DEFAULT_CONFIG["default_sort_by"])
include_stats = arguments.get("include_stats", DEFAULT_CONFIG["include_stats"])
# 获取程序列表
programs = program_manager.programs.copy()
# 排序
if sort_by == "install_date":
programs.sort(key=lambda x: x.install_date or "")
elif sort_by == "drive_letter":
programs.sort(key=lambda x: x.drive_letter or "")
else: # 默认按名称排序
programs.sort(key=lambda x: x.name.lower())
# 限制数量
programs = programs[:limit]
# 生成报告
success, message = report_generator.generate_enhanced_markdown_report(programs, filename, include_stats)
return [types.TextContent(type="text", text=message)]
async def main():
"""主函数 - 运行MCP服务器"""
from mcp.server.stdio import stdio_server
async with stdio_server() as (read_stream, write_stream):
await server.run(
read_stream,
write_stream,
InitializationOptions(
server_name="undoom-uninstaller-mcp",
server_version="0.1.7",
capabilities=server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
),
),
)
def cli_main():
"""CLI入口点"""
asyncio.run(main())
if __name__ == "__main__":
cli_main()
|
2301_80863610/undoom_Uninstaller_mcp
|
undoom_uninstaller_mcp/server.py
|
Python
|
mit
| 13,312
|
"""工具函数模块"""
import os
import shutil
from typing import Optional, Tuple, List
from datetime import datetime
def format_size(size: int) -> str:
"""格式化文件大小
Args:
size: 文件大小(字节)
Returns:
格式化后的大小字符串
"""
if size == 0:
return "N/A"
units = ['B', 'KB', 'MB', 'GB', 'TB']
for unit in units:
if size < 1024.0:
return f"{size:.1f} {unit}"
size /= 1024.0
return f"{size:.1f} TB"
def get_directory_size(path: str) -> int:
"""获取目录大小(优化版本,避免长时间阻塞)
Args:
path: 目录路径
Returns:
目录总大小(字节)
"""
if not path or not os.path.isdir(path):
return 0
total_size = 0
try:
# 限制扫描深度和文件数量,避免长时间阻塞
file_count = 0
max_files = 500 # 最多扫描500个文件
max_depth = 2 # 最大深度2层
for root, dirs, files in os.walk(path):
# 计算当前深度
depth = root[len(path):].count(os.sep)
if depth >= max_depth:
dirs[:] = [] # 不再深入子目录
continue
for filename in files:
if file_count >= max_files:
return total_size # 达到文件数量限制,返回当前大小
filepath = os.path.join(root, filename)
try:
total_size += os.path.getsize(filepath)
file_count += 1
except (OSError, IOError):
continue
except (OSError, IOError, KeyboardInterrupt):
pass
return total_size
def safe_remove_directory(path: str) -> Tuple[bool, str]:
"""安全删除目录
Args:
path: 目录路径
Returns:
(成功标志, 消息)
"""
if not path or not os.path.exists(path):
return True, "路径不存在"
try:
if os.path.isdir(path):
shutil.rmtree(path)
return True, f"成功删除目录: {path}"
else:
os.remove(path)
return True, f"成功删除文件: {path}"
except Exception as e:
return False, f"删除失败 {path}: {str(e)}"
def format_install_date(date_str: str) -> str:
"""格式化安装日期
Args:
date_str: 原始日期字符串(YYYYMMDD格式)
Returns:
格式化后的日期字符串(YYYY-MM-DD格式)
"""
if not date_str or len(date_str) != 8:
return "未知"
try:
return f"{date_str[:4]}-{date_str[4:6]}-{date_str[6:8]}"
except (ValueError, IndexError):
return "未知"
def get_drive_letter(path: str) -> str:
"""获取路径的盘符
Args:
path: 文件路径
Returns:
盘符字符串
"""
if not path:
return "未知"
if len(path) >= 2 and path[1] == ':':
return path[0].upper() + ":"
elif path.startswith("\\\\"): # UNC路径
return "网络路径"
else:
return "未知"
def escape_markdown(text: str) -> str:
"""转义Markdown特殊字符
Args:
text: 原始文本
Returns:
转义后的文本
"""
if not text:
return ""
return text.replace("|", "\\|")
def truncate_text(text: str, max_length: int) -> str:
"""截断文本
Args:
text: 原始文本
max_length: 最大长度
Returns:
截断后的文本
"""
if not text or len(text) <= max_length:
return text
return text[:max_length-3] + "..."
def get_common_residue_paths(program_name: str) -> List[str]:
"""获取程序常见残留路径
Args:
program_name: 程序名称
Returns:
可能的残留路径列表
"""
paths = []
env_vars = ["APPDATA", "LOCALAPPDATA", "PROGRAMDATA", "USERPROFILE"]
for env_var in env_vars:
base_path = os.environ.get(env_var, "")
if base_path:
if env_var == "USERPROFILE":
# 用户目录下的特殊路径
paths.extend([
os.path.join(base_path, "AppData", "Local", program_name),
os.path.join(base_path, "AppData", "Roaming", program_name)
])
else:
paths.append(os.path.join(base_path, program_name))
return paths
|
2301_80863610/undoom_Uninstaller_mcp
|
undoom_uninstaller_mcp/utils.py
|
Python
|
mit
| 4,662
|
#!/usr/bin/env python3
"""
本地MCP服务器测试入口
"""
import asyncio
import sys
import os
# 添加当前目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
def main():
"""本地测试入口点"""
try:
# 导入服务器模块
from system_monitor_mcp.server import main as async_main
# 运行异步主函数
asyncio.run(async_main())
except KeyboardInterrupt:
print("服务器已停止", file=sys.stderr)
sys.exit(0)
except Exception as e:
print(f"服务器启动失败: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
|
2301_80863610/system-monitor
|
main.py
|
Python
|
mit
| 675
|
#!/usr/bin/env python3
"""
MCP测试代码 - 基于JSON配置生成
"""
import asyncio
import sys
import os
# 添加当前目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
def main():
"""MCP测试入口点"""
try:
# 导入服务器模块
from system_monitor_mcp.server import main as async_main
print("启动MCP服务器...")
# 运行异步主函数
asyncio.run(async_main())
except KeyboardInterrupt:
print("\n服务器已停止", file=sys.stderr)
sys.exit(0)
except Exception as e:
print(f"服务器启动失败: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
|
2301_80863610/system-monitor
|
mcp_test.py
|
Python
|
mit
| 723
|
#!/usr/bin/env python3
"""
System Monitor MCP Server - 包初始化文件
"""
__version__ = "1.1.2"
__author__ = "Undoom"
__description__ = "System Monitor MCP Server - 系统监控MCP服务器"
from .server import MCPServer
from .server import main
# 为了兼容性,也导出SystemMonitorMCP别名
SystemMonitorMCP = MCPServer
|
2301_80863610/system-monitor
|
system_monitor_mcp/__init__.py
|
Python
|
mit
| 333
|
#!/usr/bin/env python3
"""
System Monitor MCP Server - 包入口点
"""
import asyncio
import sys
import os
import argparse
def main():
"""Entry point for the package"""
parser = argparse.ArgumentParser(description='System Monitor MCP Server')
parser.add_argument('--version', action='version', version='system-monitor-mcp 1.1.4')
parser.add_argument('--help-mcp', action='store_true', help='显示MCP服务器帮助信息')
args = parser.parse_args()
if args.help_mcp:
print("System Monitor MCP Server")
print("提供系统监控功能的MCP服务器")
print("支持的工具: get_system_info, get_cpu_info, get_memory_info, get_disk_info, get_network_info, get_processes_info, monitor_resource")
return
try:
# 导入服务器模块
try:
from .server import main as async_main
except ImportError:
# 如果相对导入失败,尝试绝对导入
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from system_monitor_mcp.server import main as async_main
# 运行异步主函数
asyncio.run(async_main())
except KeyboardInterrupt:
print("服务器已停止", file=sys.stderr)
sys.exit(0)
except Exception as e:
print(f"服务器启动失败: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
|
2301_80863610/system-monitor
|
system_monitor_mcp/__main__.py
|
Python
|
mit
| 1,503
|
#!/usr/bin/env python3
"""
System Monitor MCP Server - 数据收集模块
"""
import platform
import time
from typing import Dict, Any
import psutil
from .utils import format_uptime
async def collect_system_info() -> Dict[str, Any]:
"""收集系统基本信息"""
# CPU信息
cpu_freq = psutil.cpu_freq()
cpu_info = {
'physical_cores': psutil.cpu_count(logical=False),
'logical_cores': psutil.cpu_count(logical=True),
'current_freq': cpu_freq.current if cpu_freq else 0,
'max_freq': cpu_freq.max if cpu_freq else 0,
'min_freq': cpu_freq.min if cpu_freq else 0,
'usage': psutil.cpu_percent(interval=1)
}
# 内存信息
mem = psutil.virtual_memory()
memory_info = {
'total': mem.total,
'available': mem.available,
'used': mem.used,
'free': mem.free,
'percent': mem.percent
}
# 系统信息
boot_time = psutil.boot_time()
uptime_seconds = time.time() - boot_time
return {
'platform': {
'system': platform.system(),
'release': platform.release(),
'version': platform.version(),
'machine': platform.machine(),
'processor': platform.processor(),
'node': platform.node()
},
'cpu': cpu_info,
'memory': memory_info,
'boot_time': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(boot_time)),
'uptime': format_uptime(uptime_seconds)
}
async def collect_cpu_info() -> Dict[str, Any]:
"""收集CPU信息"""
cpu_percent = psutil.cpu_percent(interval=1, percpu=True)
cpu_freq = psutil.cpu_freq()
return {
'timestamp': time.time(),
'usage_total': sum(cpu_percent) / len(cpu_percent),
'usage_per_cpu': cpu_percent,
'frequency': {
'current': cpu_freq.current if cpu_freq else 0,
'max': cpu_freq.max if cpu_freq else 0,
'min': cpu_freq.min if cpu_freq else 0
},
'cores': {
'physical': psutil.cpu_count(logical=False),
'logical': psutil.cpu_count(logical=True)
}
}
async def collect_memory_info() -> Dict[str, Any]:
"""收集内存信息"""
mem = psutil.virtual_memory()
swap = psutil.swap_memory()
return {
'timestamp': time.time(),
'virtual': {
'total': mem.total,
'available': mem.available,
'used': mem.used,
'free': mem.free,
'percent': mem.percent
},
'swap': {
'total': swap.total,
'used': swap.used,
'free': swap.free,
'percent': swap.percent
}
}
async def collect_disk_info() -> Dict[str, Any]:
"""收集磁盘信息"""
partitions = psutil.disk_partitions()
disk_info = {
'timestamp': time.time(),
'partitions': [],
'io_counters': None
}
for partition in partitions:
try:
usage = psutil.disk_usage(partition.mountpoint)
disk_info['partitions'].append({
'device': partition.device,
'mountpoint': partition.mountpoint,
'fstype': partition.fstype,
'total': usage.total,
'used': usage.used,
'free': usage.free,
'percent': usage.percent
})
except PermissionError:
disk_info['partitions'].append({
'device': partition.device,
'mountpoint': partition.mountpoint,
'fstype': partition.fstype,
'error': 'Permission denied'
})
# 磁盘I/O统计
disk_io = psutil.disk_io_counters()
if disk_io:
disk_info['io_counters'] = {
'read_count': disk_io.read_count,
'write_count': disk_io.write_count,
'read_bytes': disk_io.read_bytes,
'write_bytes': disk_io.write_bytes,
'read_time': disk_io.read_time,
'write_time': disk_io.write_time
}
return disk_info
async def collect_network_info() -> Dict[str, Any]:
"""收集网络信息"""
net_io = psutil.net_io_counters()
net_if_addrs = psutil.net_if_addrs()
net_if_stats = psutil.net_if_stats()
network_info = {
'timestamp': time.time(),
'io_counters': {
'bytes_sent': net_io.bytes_sent,
'bytes_recv': net_io.bytes_recv,
'packets_sent': net_io.packets_sent,
'packets_recv': net_io.packets_recv,
'errin': net_io.errin,
'errout': net_io.errout,
'dropin': net_io.dropin,
'dropout': net_io.dropout
},
'interfaces': {}
}
for interface, addrs in net_if_addrs.items():
stats = net_if_stats.get(interface)
interface_info = {
'addresses': [],
'stats': {}
}
if stats:
interface_info['stats'] = {
'isup': stats.isup,
'duplex': stats.duplex,
'speed': stats.speed,
'mtu': stats.mtu
}
for addr in addrs:
addr_info = {
'family': addr.family,
'address': addr.address
}
if addr.netmask:
addr_info['netmask'] = addr.netmask
if addr.broadcast:
addr_info['broadcast'] = addr.broadcast
interface_info['addresses'].append(addr_info)
network_info['interfaces'][interface] = interface_info
return network_info
async def collect_processes_info() -> Dict[str, Any]:
"""收集进程信息"""
processes = []
for proc in psutil.process_iter(['pid', 'name', 'cpu_percent', 'memory_percent', 'status', 'username', 'create_time', 'cmdline']):
try:
pinfo = proc.info
processes.append({
'pid': pinfo['pid'],
'name': pinfo['name'],
'cpu_percent': pinfo['cpu_percent'],
'memory_percent': pinfo['memory_percent'],
'status': pinfo['status'],
'username': pinfo['username'] or 'N/A',
'create_time': pinfo['create_time'],
'cmdline': ' '.join(pinfo['cmdline']) if pinfo['cmdline'] else ''
})
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
# 按CPU使用率排序
processes.sort(key=lambda x: x['cpu_percent'], reverse=True)
return {
'timestamp': time.time(),
'total_processes': len(processes),
'processes': processes[:50] # 只返回前50个进程
}
|
2301_80863610/system-monitor
|
system_monitor_mcp/collectors.py
|
Python
|
mit
| 6,829
|
#!/usr/bin/env python3
"""
System Monitor MCP Server - 主服务器模块
"""
import asyncio
import json
import logging
import sys
from typing import Dict, Any, List, Optional
from .collectors import (
collect_system_info,
collect_cpu_info,
collect_memory_info,
collect_disk_info,
collect_network_info,
collect_processes_info
)
from .utils import setup_logging
# 设置日志
logger = logging.getLogger("system_monitor_mcp")
class MCPServer:
"""MCP服务器实现"""
def __init__(self):
"""初始化MCP服务器"""
self.tools = {
"get_system_info": {
"description": "获取系统基本信息,包括CPU、内存、操作系统等",
"inputSchema": {
"type": "object",
"properties": {},
"required": []
}
},
"get_cpu_info": {
"description": "获取CPU详细信息,包括使用率、频率等",
"inputSchema": {
"type": "object",
"properties": {},
"required": []
}
},
"get_memory_info": {
"description": "获取内存详细信息,包括物理内存和交换内存",
"inputSchema": {
"type": "object",
"properties": {},
"required": []
}
},
"get_disk_info": {
"description": "获取磁盘详细信息,包括分区和I/O统计",
"inputSchema": {
"type": "object",
"properties": {},
"required": []
}
},
"get_network_info": {
"description": "获取网络详细信息,包括接口和流量统计",
"inputSchema": {
"type": "object",
"properties": {},
"required": []
}
},
"get_processes_info": {
"description": "获取进程详细信息,包括CPU使用率、内存使用率等",
"inputSchema": {
"type": "object",
"properties": {
"limit": {
"type": "integer",
"description": "返回的进程数量限制,默认为50",
"default": 50
},
"sort_by": {
"type": "string",
"description": "排序字段,可选值:cpu_percent, memory_percent, pid, name",
"enum": ["cpu_percent", "memory_percent", "pid", "name"],
"default": "cpu_percent"
},
"sort_desc": {
"type": "boolean",
"description": "是否降序排序",
"default": True
}
},
"required": []
}
},
"monitor_resource": {
"description": "监控系统资源使用情况,定期返回资源数据",
"inputSchema": {
"type": "object",
"properties": {
"resource_type": {
"type": "string",
"description": "要监控的资源类型",
"enum": ["cpu", "memory", "disk", "network", "all"],
"default": "all"
},
"interval": {
"type": "integer",
"description": "监控间隔(秒),默认为5秒",
"default": 5
},
"duration": {
"type": "integer",
"description": "监控持续时间(秒),默认为60秒",
"default": 60
}
},
"required": ["resource_type"]
}
}
}
self.resources = {}
async def get_system_info(self, args: Dict[str, Any]) -> Dict[str, Any]:
"""获取系统基本信息"""
return await collect_system_info()
async def get_cpu_info(self, args: Dict[str, Any]) -> Dict[str, Any]:
"""获取CPU详细信息"""
return await collect_cpu_info()
async def get_memory_info(self, args: Dict[str, Any]) -> Dict[str, Any]:
"""获取内存详细信息"""
return await collect_memory_info()
async def get_disk_info(self, args: Dict[str, Any]) -> Dict[str, Any]:
"""获取磁盘详细信息"""
return await collect_disk_info()
async def get_network_info(self, args: Dict[str, Any]) -> Dict[str, Any]:
"""获取网络详细信息"""
return await collect_network_info()
async def get_processes_info(self, args: Dict[str, Any]) -> Dict[str, Any]:
"""获取进程详细信息"""
limit = args.get("limit", 50)
sort_by = args.get("sort_by", "cpu_percent")
sort_desc = args.get("sort_desc", True)
processes_info = await collect_processes_info()
# 排序进程
processes = processes_info["processes"]
processes.sort(key=lambda x: x.get(sort_by, 0), reverse=sort_desc)
# 限制返回数量
processes_info["processes"] = processes[:limit]
return processes_info
async def monitor_resource(self, args: Dict[str, Any]) -> Dict[str, Any]:
"""监控系统资源使用情况"""
resource_type = args.get("resource_type", "all")
interval = args.get("interval", 5)
duration = args.get("duration", 60)
# 计算需要监控的次数
count = max(1, duration // interval)
# 初始化结果
result = {
"resource_type": resource_type,
"interval": interval,
"duration": duration,
"data": []
}
# 根据资源类型选择监控函数
monitor_functions = {}
if resource_type == "all" or resource_type == "cpu":
monitor_functions["cpu"] = collect_cpu_info
if resource_type == "all" or resource_type == "memory":
monitor_functions["memory"] = collect_memory_info
if resource_type == "all" or resource_type == "disk":
monitor_functions["disk"] = collect_disk_info
if resource_type == "all" or resource_type == "network":
monitor_functions["network"] = collect_network_info
# 开始监控
for i in range(count):
data_point = {"timestamp": None}
# 收集各类资源数据
for res_type, func in monitor_functions.items():
res_data = await func()
data_point["timestamp"] = res_data["timestamp"]
data_point[res_type] = res_data
result["data"].append(data_point)
# 如果不是最后一次,则等待间隔时间
if i < count - 1:
await asyncio.sleep(interval)
return result
async def handle_request(self, request: Dict[str, Any]) -> Dict[str, Any]:
"""处理JSON-RPC请求"""
try:
method = request.get("method")
params = request.get("params", {})
request_id = request.get("id")
if method == "initialize":
# 初始化响应
return {
"jsonrpc": "2.0",
"id": request_id,
"result": {
"protocolVersion": "2024-11-05",
"capabilities": {
"tools": self.tools,
"resources": self.resources
},
"serverInfo": {
"name": "system-monitor-mcp",
"version": "0.1.1"
}
}
}
elif method == "tools/call":
# 工具调用
tool_name = params.get("name")
tool_args = params.get("arguments", {})
if tool_name not in self.tools:
return {
"jsonrpc": "2.0",
"id": request_id,
"error": {
"code": -32601,
"message": f"Unknown tool: {tool_name}"
}
}
try:
# 调用对应的工具函数
if tool_name == "get_system_info":
result = await self.get_system_info(tool_args)
elif tool_name == "get_cpu_info":
result = await self.get_cpu_info(tool_args)
elif tool_name == "get_memory_info":
result = await self.get_memory_info(tool_args)
elif tool_name == "get_disk_info":
result = await self.get_disk_info(tool_args)
elif tool_name == "get_network_info":
result = await self.get_network_info(tool_args)
elif tool_name == "get_processes_info":
result = await self.get_processes_info(tool_args)
elif tool_name == "monitor_resource":
result = await self.monitor_resource(tool_args)
else:
raise ValueError(f"Unknown tool: {tool_name}")
return {
"jsonrpc": "2.0",
"id": request_id,
"result": {
"content": [
{
"type": "text",
"text": json.dumps(result, indent=2, ensure_ascii=False)
}
]
}
}
except Exception as e:
logger.exception(f"Error executing tool {tool_name}")
return {
"jsonrpc": "2.0",
"id": request_id,
"error": {
"code": -32603,
"message": str(e)
}
}
elif method == "tools/list":
# 列出可用工具
return {
"jsonrpc": "2.0",
"id": request_id,
"result": {
"tools": [
{
"name": name,
"description": info["description"],
"inputSchema": info["inputSchema"]
}
for name, info in self.tools.items()
]
}
}
elif method == "resources/list":
# 列出可用资源
return {
"jsonrpc": "2.0",
"id": request_id,
"result": {
"resources": self.resources
}
}
else:
return {
"jsonrpc": "2.0",
"id": request_id,
"error": {
"code": -32601,
"message": f"Unknown method: {method}"
}
}
except Exception as e:
logger.exception("Error handling request")
return {
"jsonrpc": "2.0",
"id": request.get("id"),
"error": {
"code": -32603,
"message": str(e)
}
}
async def run(self):
"""运行MCP服务器"""
logger.info("System Monitor MCP Server starting...")
# 使用标准输入/输出进行通信
logger.info("MCP Server ready to receive messages")
while True:
try:
# 读取一行JSON消息
line = await asyncio.get_event_loop().run_in_executor(
None, sys.stdin.readline
)
if not line:
logger.info("Connection closed")
break
# 解析JSON请求
try:
request = json.loads(line.strip())
logger.debug(f"Received request: {request}")
except json.JSONDecodeError as e:
logger.error(f"Invalid JSON: {e}")
continue
# 处理请求
response = await self.handle_request(request)
logger.debug(f"Sending response: {response}")
# 发送响应
response_line = json.dumps(response) + '\n'
sys.stdout.write(response_line)
sys.stdout.flush()
except Exception as e:
logger.exception("Error processing message")
# 发送错误响应
error_response = {
"jsonrpc": "2.0",
"id": None,
"error": {
"code": -32603,
"message": str(e)
}
}
error_line = json.dumps(error_response) + '\n'
sys.stdout.write(error_line)
sys.stdout.flush()
async def async_main():
"""异步主函数"""
# 设置日志
setup_logging(logging.INFO)
# 创建并运行MCP服务器
server = MCPServer()
await server.run()
def main():
"""同步主函数 - entry point"""
try:
asyncio.run(async_main())
except KeyboardInterrupt:
print("服务器已停止", file=sys.stderr)
sys.exit(0)
if __name__ == "__main__":
main()
|
2301_80863610/system-monitor
|
system_monitor_mcp/server.py
|
Python
|
mit
| 14,939
|
#!/usr/bin/env python3
"""
System Monitor MCP Server - 工具函数模块
"""
import logging
import os
import sys
from typing import Dict, Any
def setup_logging(level=logging.INFO):
"""设置日志配置"""
# 创建日志目录
log_dir = os.path.join(os.path.expanduser("~"), ".system_monitor_mcp", "logs")
os.makedirs(log_dir, exist_ok=True)
# 设置日志格式
log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
# 创建文件日志处理器
log_file = os.path.join(log_dir, "system_monitor_mcp.log")
handlers = [
logging.FileHandler(log_file, encoding='utf-8') # 只输出到文件
]
# 配置日志
logging.basicConfig(
level=level,
format=log_format,
handlers=handlers,
force=True # 强制重新配置
)
# 禁用根日志记录器的默认处理器
root_logger = logging.getLogger()
root_logger.handlers.clear()
for handler in handlers:
root_logger.addHandler(handler)
# 设置第三方库的日志级别
logging.getLogger("asyncio").setLevel(logging.WARNING)
def format_bytes(bytes_value: int) -> str:
"""将字节数格式化为人类可读的形式"""
if bytes_value < 1024:
return f"{bytes_value} B"
elif bytes_value < 1024 ** 2:
return f"{bytes_value / 1024:.2f} KB"
elif bytes_value < 1024 ** 3:
return f"{bytes_value / (1024 ** 2):.2f} MB"
elif bytes_value < 1024 ** 4:
return f"{bytes_value / (1024 ** 3):.2f} GB"
else:
return f"{bytes_value / (1024 ** 4):.2f} TB"
def format_uptime(seconds: float) -> str:
"""将秒数格式化为人类可读的运行时间"""
days, remainder = divmod(int(seconds), 86400)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
parts = []
if days > 0:
parts.append(f"{days}天")
if hours > 0 or days > 0:
parts.append(f"{hours}小时")
if minutes > 0 or hours > 0 or days > 0:
parts.append(f"{minutes}分钟")
parts.append(f"{seconds}秒")
return " ".join(parts)
|
2301_80863610/system-monitor
|
system_monitor_mcp/utils.py
|
Python
|
mit
| 2,149
|
import math
import random
from collections import defaultdict
def euclidean_distance(point1, point2):
if len(point1) != len(point2):
raise ValueError("Points must have the same dimensions")
squared_distance = 0
for i in range(len(point1)):
squared_distance += (point1[i] - point2[i]) ** 2
return math.sqrt(squared_distance)
def assign_cluster(x, centroids):
min_distance = float('inf')
closest_centroid = 0
for i, centroid in enumerate(centroids):
distance = euclidean_distance(x, centroid)
if distance < min_distance:
min_distance = distance
closest_centroid = i
return closest_centroid
def initialize_centroids(data, k):
# 从数据中随机选择k个点作为初始质心
return random.sample(data, k)
def update_centroids(clusters, data_dim):
new_centroids = []
for cluster_idx, points in clusters.items():
if not points:
# 如果某个簇没有点,随机重新初始化
new_centroid = [random.random() for _ in range(data_dim)]
new_centroids.append(new_centroid)
continue
# 计算该簇中所有点的均值作为新质心
centroid = [0] * data_dim
for point in points:
for i in range(data_dim):
centroid[i] += point[i]
for i in range(data_dim):
centroid[i] /= len(points)
new_centroids.append(centroid)
return new_centroids
def has_converged(old_centroids, new_centroids, epsilon):
for old_centroid, new_centroid in zip(old_centroids, new_centroids):
distance = euclidean_distance(old_centroid, new_centroid)
if distance >= epsilon:
return False
return True
def Kmeans(data, k, epsilon=1e-4, iteration=100):
if len(data) < k:
raise ValueError("Number of data points must be at least k")
if k <= 0:
raise ValueError("k must be positive")
# 1. 初始化质心
centroids = initialize_centroids(data, k)
data_dim = len(data[0])
# 用于存储历史信息
history = {
'centroids': [centroids.copy()],
'assignments': []
}
for iter_count in range(iteration):
# 2. 分配数据点到最近的质心
clusters = defaultdict(list)
assignments = []
for point in data:
cluster_idx = assign_cluster(point, centroids)
clusters[cluster_idx].append(point)
assignments.append(cluster_idx)
history['assignments'].append(assignments.copy())
# 3. 更新质心
new_centroids = update_centroids(clusters, data_dim)
history['centroids'].append(new_centroids.copy())
# 4. 检查收敛
if has_converged(centroids, new_centroids, epsilon):
print(f"算法在 {iter_count + 1} 次迭代后收敛")
break
centroids = new_centroids
else:
print(f"算法在 {iteration} 次迭代后未收敛")
return centroids, clusters, assignments, history
# 测试代码
def generate_sample_data(n_points=100, centers=3, dim=2):
data = []
true_centers = []
# 生成真实的中心点
for i in range(centers):
center = [random.uniform(0, 10) for _ in range(dim)]
true_centers.append(center)
# 围绕中心点生成数据
for _ in range(n_points // centers):
point = [coord + random.gauss(0, 1) for coord in center]
data.append(point)
return data, true_centers
def calculate_sse(data, assignments, centroids):
sse = 0
for point, assignment in zip(data, assignments):
centroid = centroids[assignment]
sse += euclidean_distance(point, centroid) ** 2
return sse
# 演示使用
if __name__ == "__main__":
# 生成测试数据
print("生成测试数据...")
data, true_centers = generate_sample_data(n_points=150, centers=3, dim=2)
print(f"数据点数量: {len(data)}")
print(f"数据维度: {len(data[0])}")
# 运行K均值聚类
print("\n运行K均值聚类...")
centroids, clusters, assignments, history = Kmeans(data, k=3, epsilon=0.001, iteration=100)
# 输出结果
print(f"\n聚类结果:")
print(f"找到的质心数量: {len(centroids)}")
for i, centroid in enumerate(centroids):
print(f"簇 {i}: 质心位置 {[round(c, 3) for c in centroid]}, 包含 {len(clusters[i])} 个点")
# 计算误差
sse = calculate_sse(data, assignments, centroids)
print(f"\n聚类误差平方和(SSE): {sse:.4f}")
# 显示每个点的分配情况(前10个)
print(f"\n前10个数据点的簇分配:")
for i in range(min(10, len(data))):
print(f"点 {i}: {data[i]} -> 簇 {assignments[i]}")
|
2301_80822435/machine-learning-course
|
assignment1/assignment3/2班44.py
|
Python
|
mit
| 4,931
|
import math
import operator
from collections import Counter
def euclidean_distance(point1, point2):
if len(point1) != len(point2):
raise ValueError("Points must have the same dimensions")
squared_distance = 0
for i in range(len(point1)):
squared_distance += (point1[i] - point2[i]) ** 2
return math.sqrt(squared_distance)
def manhattan_distance(point1, point2):
if len(point1) != len(point2):
raise ValueError("Points must have the same dimensions")
distance = 0
for i in range(len(point1)):
distance += abs(point1[i] - point2[i])
return distance
def cosine_similarity(point1, point2):
if len(point1) != len(point2):
raise ValueError("Points must have the same dimensions")
dot_product = 0
norm1 = 0
norm2 = 0
for i in range(len(point1)):
dot_product += point1[i] * point2[i]
norm1 += point1[i] ** 2
norm2 += point2[i] ** 2
norm1 = math.sqrt(norm1)
norm2 = math.sqrt(norm2)
if norm1 == 0 or norm2 == 0:
return 0
return dot_product / (norm1 * norm2)
class KNN:
def __init__(self, k=3, distance_metric='euclidean', weights='uniform'):
self.k = k
self.distance_metric = distance_metric
self.weights = weights
self.X_train = None
self.y_train = None
def fit(self, X, y):
if len(X) != len(y):
raise ValueError("X and y must have the same length")
self.X_train = X
self.y_train = y
return self
def _calculate_distance(self, point1, point2):
if self.distance_metric == 'euclidean':
return euclidean_distance(point1, point2)
elif self.distance_metric == 'manhattan':
return manhattan_distance(point1, point2)
elif self.distance_metric == 'cosine':
# 余弦相似度转换为距离:1 - 相似度
return 1 - cosine_similarity(point1, point2)
else:
raise ValueError("Unsupported distance metric")
def predict_single(self, x):
if self.X_train is None or self.y_train is None:
raise ValueError("Model must be fitted before prediction")
# 计算与所有训练样本的距离
distances = []
for i, train_point in enumerate(self.X_train):
dist = self._calculate_distance(x, train_point)
distances.append((dist, self.y_train[i]))
# 按距离排序
distances.sort(key=operator.itemgetter(0))
# 获取前k个最近邻
k_neighbors = distances[:self.k]
if self.weights == 'uniform':
# 均匀权重:简单投票
neighbor_labels = [label for _, label in k_neighbors]
most_common = Counter(neighbor_labels).most_common(1)
return most_common[0][0]
elif self.weights == 'distance':
# 距离权重:距离越近权重越大
weight_dict = {}
for dist, label in k_neighbors:
# 避免除零错误
if dist == 0:
weight = float('inf')
else:
weight = 1 / dist
if label in weight_dict:
weight_dict[label] += weight
else:
weight_dict[label] = weight
# 返回权重最大的类别
return max(weight_dict.items(), key=operator.itemgetter(1))[0]
else:
raise ValueError("Unsupported weight type")
def predict(self, X):
return [self.predict_single(x) for x in X]
def predict_proba_single(self, x):
if self.X_train is None or self.y_train is None:
raise ValueError("Model must be fitted before prediction")
# 计算与所有训练样本的距离
distances = []
for i, train_point in enumerate(self.X_train):
dist = self._calculate_distance(x, train_point)
distances.append((dist, self.y_train[i]))
# 按距离排序
distances.sort(key=operator.itemgetter(0))
# 获取前k个最近邻
k_neighbors = distances[:self.k]
if self.weights == 'uniform':
# 均匀权重:简单投票
neighbor_labels = [label for _, label in k_neighbors]
most_common = Counter(neighbor_labels).most_common(1)
return most_common[0][0]
elif self.weights == 'distance':
# 距离权重:距离越近权重越大
weight_dict = {}
for dist, label in k_neighbors:
# 避免除零错误
if dist == 0:
weight = float('inf')
else:
weight = 1 / dist
if label in weight_dict:
weight_dict[label] += weight
else:
weight_dict[label] = weight
# 返回权重最大的类别
return max(weight_dict.items(), key=operator.itemgetter(1))[0]
else:
raise ValueError("Unsupported weight type")
def predict(self, X):
return [self.predict_single(x) for x in X]
def predict_proba_single(self, x):
if self.X_train is None or self.y_train is None:
raise ValueError("Model must be fitted before prediction")
# 计算与所有训练样本的距离
distances = []
for i, train_point in enumerate(self.X_train):
dist = self._calculate_distance(x, train_point)
distances.append((dist, self.y_train[i]))
# 按距离排序
distances.sort(key=operator.itemgetter(0))
# 获取前k个最近邻
k_neighbors = distances[:self.k]
# 统计各类别的权重
class_weights = {}
total_weight = 0
for dist, label in k_neighbors:
if self.weights == 'uniform':
weight = 1
else: # distance weights
weight = 1 / dist if dist != 0 else float('inf')
if label in class_weights:
class_weights[label] += weight
else:
class_weights[label] = weight
total_weight += weight
# 计算概率
probabilities = {}
for label, weight in class_weights.items():
probabilities[label] = weight / total_weight
return probabilities
def predict_proba(self, X):
return [self.predict_proba_single(x) for x in X]
def score(self, X, y):
predictions = self.predict(X)
correct = 0
for pred, true in zip(predictions, y):
if pred == true:
correct += 1
return correct / len(y)
def normalize_data(X):
if not X:
return X
# 转置数据以便按特征处理
features = list(zip(*X))
normalized_features = []
for feature in features:
min_val = min(feature)
max_val = max(feature)
range_val = max_val - min_val
if range_val == 0:
# 如果所有值相同,设为0.5
normalized_feature = [0.5] * len(feature)
else:
normalized_feature = [(x - min_val) / range_val for x in feature]
normalized_features.append(normalized_feature)
# 转置回来
return list(zip(*normalized_features))
def train_test_split(X, y, test_size=0.2, random_state=None):
if len(X) != len(y):
raise ValueError("X and y must have the same length")
if random_state is not None:
random.seed(random_state)
# 创建索引并打乱
indices = list(range(len(X)))
random.shuffle(indices)
# 计算测试集大小
test_count = int(len(X) * test_size)
# 分割索引
test_indices = indices[:test_count]
train_indices = indices[test_count:]
# 分割数据
X_train = [X[i] for i in train_indices]
X_test = [X[i] for i in test_indices]
y_train = [y[i] for i in train_indices]
y_test = [y[i] for i in test_indices]
return X_train, X_test, y_train, y_test
# 测试代码
def generate_sample_data(n_samples=100, n_features=2, n_classes=3):
X = []
y = []
for class_idx in range(n_classes):
# 为每个类别生成一个中心点
center = [class_idx * 5 + random.uniform(-1, 1) for _ in range(n_features)]
for _ in range(n_samples // n_classes):
# 围绕中心点生成数据
point = [coord + random.gauss(0, 1) for coord in center]
X.append(point)
y.append(class_idx)
return X, y
# 演示使用
if __name__ == "__main__":
import random
print("生成测试数据...")
X, y = generate_sample_data(n_samples=150, n_features=2, n_classes=3)
print(f"数据点数量: {len(X)}")
print(f"特征数量: {len(X[0])}")
print(f"类别数量: {len(set(y))}")
# 数据归一化
print("\n数据归一化...")
X_normalized = normalize_data(X)
# 分割训练测试集
print("分割训练测试集...")
X_train, X_test, y_train, y_test = train_test_split(
X_normalized, y, test_size=0.2, random_state=42
)
print(f"训练集大小: {len(X_train)}")
print(f"测试集大小: {len(X_test)}")
# 测试不同的k值
k_values = [1, 3, 5, 7]
for k in k_values:
print(f"\n=== k={k} ===")
# 创建并训练KNN模型
knn = KNN(k=k, distance_metric='euclidean', weights='uniform')
knn.fit(X_train, y_train)
# 预测并计算准确率
accuracy = knn.score(X_test, y_test)
print(f"准确率: {accuracy:.4f}")
# 显示前5个测试样本的预测结果
print("前5个测试样本的预测:")
for i in range(min(5, len(X_test))):
pred = knn.predict_single(X_test[i])
proba = knn.predict_proba_single(X_test[i])
print(f" 真实: {y_test[i]}, 预测: {pred}, 概率: {proba}")
# 测试不同距离度量
print(f"\n=== 不同距离度量比较 (k=3) ===")
metrics = ['euclidean', 'manhattan', 'cosine']
for metric in metrics:
knn = KNN(k=3, distance_metric=metric, weights='uniform')
knn.fit(X_train, y_train)
accuracy = knn.score(X_test, y_test)
print(f"{metric}: {accuracy:.4f}")
|
2301_80822435/machine-learning-course
|
assignment1/assignment4/2班44.py
|
Python
|
mit
| 10,796
|
import math
import random
def Kmeans(data, k, epsilon=1e-4, max_iterations=100):
# 辅助函数:计算两个向量的欧氏距离
def euclidean_distance(a, b):
return math.sqrt(sum((x - y) ** 2 for x, y in zip(a, b)))
# 辅助函数:将样本分配到最近的聚类中心
def assign_cluster(x, c):
min_distance = float('inf')
cluster_index = 0
for i, centroid in enumerate(c):
distance = euclidean_distance(x, centroid)
if distance < min_distance:
min_distance = distance
cluster_index = i
return cluster_index
# 初始化聚类中心 - 随机选择k个样本
c = random.sample(data, k)
# 初始化labels变量
labels = []
# 迭代优化
for _ in range(max_iterations):
# 分配样本到最近的聚类中心
clusters = [[] for _ in range(k)]
labels = [] # 重新初始化labels
for sample in data:
cluster_idx = assign_cluster(sample, c)
clusters[cluster_idx].append(sample)
labels.append(cluster_idx)
# 重新计算聚类中心
new_c = []
total_movement = 0.0
for i in range(k):
if clusters[i]: # 如果簇不为空
# 计算簇内均值作为新中心
dimension = len(clusters[i][0])
new_center = [0.0] * dimension
for point in clusters[i]:
for d in range(dimension):
new_center[d] += point[d]
for d in range(dimension):
new_center[d] /= len(clusters[i])
new_c.append(new_center)
# 计算中心移动距离
movement = euclidean_distance(c[i], new_center)
total_movement += movement
else:
# 如果簇为空,随机重新初始化该中心
new_c.append(random.choice(data))
# 检查收敛条件
if total_movement < epsilon:
break
c = new_c
return c, labels
|
2301_80822435/machine-learning-course
|
assignment3/1班01.py
|
Python
|
mit
| 2,119
|
import random
import math
import matplotlib.pyplot as plt
def assign_cluster(x, centroids):
"""
将样本 x 分配到最近的簇中心
"""
min_dist = float('inf')
idx = 0
for i, c in enumerate(centroids):
dist = math.dist(x, c)
if dist < min_dist:
min_dist = dist
idx = i
return idx
def Kmeans(data, k, epsilon=1e-4, iteration=100):
"""
手动实现 K-means 聚类(不依赖任何工具包)
"""
# 初始化 k 个随机中心
centroids = random.sample(data, k)
for _ in range(iteration):
# 1.分配到最近簇
clusters = [[] for _ in range(k)]
for x in data:
idx = assign_cluster(x, centroids)
clusters[idx].append(x)
# 2.计算新中心
new_centroids = []
for cluster in clusters:
if len(cluster) == 0:
new_centroids.append(random.choice(data))
else:
x_mean = sum(p[0] for p in cluster) / len(cluster)
y_mean = sum(p[1] for p in cluster) / len(cluster)
new_centroids.append((x_mean, y_mean))
# 3.检查收敛
shift = sum(math.dist(centroids[i], new_centroids[i]) for i in range(k))
centroids = new_centroids
if shift < epsilon:
break
return clusters, centroids
random.seed(0)
# 每类 50 个点
data = []
centers = [(2, 2), (7, 3), (4, 8)]
radius = 2
for cx, cy in centers:
for _ in range(50): # 每类 50 个点
x = random.uniform(cx - radius, cx + radius)
y = random.uniform(cy - radius, cy + radius)
data.append((x, y))
clusters, centroids = Kmeans(data, k=3)
print("最终中心点:")
for i, c in enumerate(centroids):
print(f"Cluster {i}: ({c[0]:.3f}, {c[1]:.3f})")
plt.figure(figsize=(7,7))
# 画每一类
for cluster in clusters:
xs = [p[0] for p in cluster]
ys = [p[1] for p in cluster]
plt.scatter(xs, ys, s=30)
# 画中心
cx = [c[0] for c in centroids]
cy = [c[1] for c in centroids]
plt.scatter(cx, cy, marker='X', s=200, linewidths=2)
plt.title("K-means Clustering Result")
plt.show()
|
2301_80822435/machine-learning-course
|
assignment3/1班02.py
|
Python
|
mit
| 2,172
|
import math
import random
def assign_cluster(x, c):
"""
将样本x分配到最近的质心c
参数:
x: 一个数据点 (列表或元组)
c: 质心列表 [c1, c2, ..., ck],每个质心是一个与x维度相同的点
返回:
cluster_index: 最近质心的索引
min_distance: 到最近质心的距离
"""
min_distance = float('inf')
cluster_index = -1
for i, centroid in enumerate(c):
# 计算欧几里得距离
distance = 0.0
for j in range(len(x)):
distance += (x[j] - centroid[j]) ** 2
distance = math.sqrt(distance)
if distance < min_distance:
min_distance = distance
cluster_index = i
return cluster_index, min_distance
def Kmeans(data, k, epsilon=1e-4, iteration=100):
"""
K-means聚类算法实现
参数:
data: 数据集,列表的列表,每个内层列表代表一个数据点
k: 聚类数量
epsilon: 收敛阈值,当质心变化小于此值时停止迭代
iteration: 最大迭代次数
返回:
centroids: 最终质心列表
clusters: 每个数据点所属的簇索引
distances: 每个数据点到其质心的距离
"""
# 1. 初始化:随机选择k个点作为初始质心
n = len(data)
if n < k:
raise ValueError("数据点数量不能小于聚类数量k")
# 随机选择k个不重复的索引
indices = random.sample(range(n), k)
centroids = [data[i][:] for i in indices] # 深拷贝初始质心
print(f"初始质心: {centroids}")
# 存储每个点所属的簇和距离
clusters = [-1] * n
distances = [0.0] * n
prev_centroids = None
iter_count = 0
for iter_count in range(iteration):
print(f"\n=== 第 {iter_count + 1} 次迭代 ===")
# 2. 分配步骤:将每个点分配到最近的质心
cluster_assignments = [[] for _ in range(k)]
for i, point in enumerate(data):
cluster_idx, dist = assign_cluster(point, centroids)
clusters[i] = cluster_idx
distances[i] = dist
cluster_assignments[cluster_idx].append(point)
# 3. 更新步骤:重新计算质心
new_centroids = []
for i in range(k):
if len(cluster_assignments[i]) == 0:
# 如果簇为空,保持原质心
new_centroids.append(centroids[i][:])
print(f"簇 {i} 为空,保持原质心")
continue
# 计算新质心(均值)
dimension = len(data[0])
new_centroid = [0.0] * dimension
for point in cluster_assignments[i]:
for j in range(dimension):
new_centroid[j] += point[j]
for j in range(dimension):
new_centroid[j] /= len(cluster_assignments[i])
new_centroids.append(new_centroid)
print(f"簇 {i} 有 {len(cluster_assignments[i])} 个点,新质心: {[round(x, 4) for x in new_centroid]}")
# 4. 检查收敛条件:质心变化是否小于epsilon
max_shift = 0.0
for i in range(k):
shift = 0.0
for j in range(len(centroids[i])):
shift += (centroids[i][j] - new_centroids[i][j]) ** 2
shift = math.sqrt(shift)
max_shift = max(max_shift, shift)
print(f"最大质心移动: {max_shift:.6f}")
if max_shift < epsilon:
print(f"算法在 {iter_count + 1} 次迭代后收敛")
break
centroids = new_centroids
prev_centroids = centroids[:]
else:
print(f"达到最大迭代次数 {iteration},停止迭代")
return centroids, clusters, distances
def calculate_wcss(data, centroids, clusters):
"""
计算簇内平方和(Within-Cluster Sum of Squares)
"""
wcss = 0.0
for i, point in enumerate(data):
centroid = centroids[clusters[i]]
distance = 0.0
for j in range(len(point)):
distance += (point[j] - centroid[j]) ** 2
wcss += distance
return wcss
# 测试代码
if __name__ == "__main__":
# 创建测试数据(二维点集,明显分为3个簇)
test_data = [
[1.0, 1.0], [1.1, 1.2], [1.2, 0.9], [0.9, 1.1], # 簇0
[4.0, 4.0], [4.1, 3.9], [3.9, 4.1], [4.2, 3.8], # 簇1
[7.0, 1.0], [7.1, 1.2], [6.9, 0.8], [7.2, 1.1] # 簇2
]
print("测试数据:")
for i, point in enumerate(test_data):
print(f"点 {i}: {point}")
# 运行K-means算法
k = 3
epsilon = 0.001
max_iterations = 100
print(f"\n开始K-means聚类,k={k}, epsilon={epsilon}")
centroids, clusters, distances = Kmeans(test_data, k, epsilon, max_iterations)
# 输出结果
print(f"\n=== 最终结果 ===")
print(f"质心:")
for i, centroid in enumerate(centroids):
print(f" 簇 {i}: {[round(x, 4) for x in centroid]}")
print(f"\n数据点分配:")
cluster_points = [[] for _ in range(k)]
for i, point in enumerate(test_data):
cluster_points[clusters[i]].append(point)
print(f"点 {point} -> 簇 {clusters[i]}, 距离: {distances[i]:.4f}")
print(f"\n各簇统计:")
for i in range(k):
print(f"簇 {i}: {len(cluster_points[i])} 个点")
# 计算评估指标
wcss = calculate_wcss(test_data, centroids, clusters)
print(f"\n簇内平方和 (WCSS): {wcss:.4f}")
# 可视化结果(简单的文本可视化)
print(f"\n=== 聚类结果可视化 ===")
for i in range(k):
print(f"簇 {i} 的点:")
for point in cluster_points[i]:
print(f" {point}")
|
2301_80822435/machine-learning-course
|
assignment3/1班03.py
|
Python
|
mit
| 6,091
|
import random
import math
def euclidean_distance(point1, point2):
if len(point1) != len(point2):
raise ValueError("点的维度必须相同")
squared_distance = 0
for i in range(len(point1)):
squared_distance += (point1[i] - point2[i]) ** 2
return math.sqrt(squared_distance)
def assign_cluster(x, centroids):
min_distance = float('inf')
closest_centroid = 0
for i, centroid in enumerate(centroids):
distance = euclidean_distance(x, centroid)
if distance < min_distance:
min_distance = distance
closest_centroid = i
return closest_centroid
def Kmeans(data, k, epsilon=1e-4, max_iterations=100):
centroids = random.sample(data, k)
clusters = [0] * len(data)
iteration = 0
centroid_movement = float('inf')
while iteration < max_iterations and centroid_movement > epsilon:
for i, point in enumerate(data):
clusters[i] = assign_cluster(point, centroids)
new_centroids = []
for cluster_idx in range(k):
cluster_points = [data[i] for i in range(len(data)) if clusters[i] == cluster_idx]
if len(cluster_points) == 0:
new_centroids.append(random.choice(data))
else:
dimension = len(cluster_points[0])
new_centroid = [0] * dimension
for point in cluster_points:
for d in range(dimension):
new_centroid[d] += point[d]
for d in range(dimension):
new_centroid[d] /= len(cluster_points)
new_centroids.append(new_centroid)
centroid_movement = 0
for i in range(k):
centroid_movement += euclidean_distance(centroids[i], new_centroids[i])
centroid_movement /= k
centroids = new_centroids
iteration += 1
print(f"迭代 {iteration}: 质心平均移动距离 = {centroid_movement:.6f}")
print(f"聚类完成,共迭代 {iteration} 次")
return centroids, clusters
def calculate_sse(data, clusters, centroids):
sse = 0
for i, point in enumerate(data):
cluster_idx = clusters[i]
sse += euclidean_distance(point, centroids[cluster_idx]) ** 2
return sse
|
2301_80822435/machine-learning-course
|
assignment3/1班04.py
|
Python
|
mit
| 2,271
|
import math
import random
def assign_cluster(x, c):
min_dist = float('inf')
cluster_idx = 0
for i, center in enumerate(c):
# 计算欧氏距离
dist = math.sqrt(sum([(a - b)**2 for a, b in zip(x, center)]))
if dist < min_dist:
min_dist = dist
cluster_idx = i
return cluster_idx
def Kmeans(data, k, epsilon=1e-6, iteration=100):
n_samples = len(data)
n_features = len(data[0]) if n_samples > 0 else 0
# 初始化聚类中心(随机选择k个样本)
centers = random.sample(data, k)
for _ in range(iteration):
# 分配每个样本到最近的聚类中心
clusters = [[] for _ in range(k)] # 存储每个聚类的样本
for x in data:
idx = assign_cluster(x, centers)
clusters[idx].append(x)
# 计算新的聚类中心
new_centers = []
for cluster in clusters:
if not cluster: # 防止空聚类(可根据需求调整策略)
new_centers.append(random.choice(data)) # 随机选一个样本作为中心
continue
# 计算每个特征的均值
center = [sum(dim) / len(cluster) for dim in zip(*cluster)]
new_centers.append(center)
# 判断是否收敛(所有中心变化小于epsilon)
max_change = 0.0
for old, new in zip(centers, new_centers):
change = math.sqrt(sum([(a - b)** 2 for a, b in zip(old, new)]))
if change > max_change:
max_change = change
if max_change < epsilon:
break
centers = new_centers
# 生成最终标签
labels = [assign_cluster(x, centers) for x in data]
return centers, labels
|
2301_80822435/machine-learning-course
|
assignment3/1班05.py
|
Python
|
mit
| 1,793
|
import random
def assign_cluster(x, centers):
min_dist_sq = float('inf')
best_cluster_idx = 0
# 计算样本到每个聚类中心的欧氏距离平方
for idx, center in enumerate(centers):
dist_sq = sum((xi - ci) ** 2 for xi, ci in zip(x, center))
if dist_sq < min_dist_sq:
min_dist_sq = dist_sq
best_cluster_idx = idx
return best_cluster_idx
def Kmeans(data, k, epsilon=1e-3, iteration=100):
# 1. 输入参数校验
if not data:
raise ValueError("输入数据不能为空")
n_samples = len(data)
if k < 1 or k > n_samples:
raise ValueError(f"k值必须满足 1 ≤ k ≤ 样本数(当前样本数:{n_samples})")
n_features = len(data[0])
for sample in data:
if len(sample) != n_features:
raise ValueError("所有样本必须具有相同的维度")
# 2. 初始化聚类中心
init_indices = random.sample(range(n_samples), k)
final_centers = [data[i].copy() for i in init_indices]
# 3. 迭代优化聚类中心
for iter_cnt in range(iteration):
clusters = [[] for _ in range(k)]
labels = []
for sample_idx, sample in enumerate(data):
cluster_idx = assign_cluster(sample, final_centers)
clusters[cluster_idx].append(sample_idx)
labels.append(cluster_idx)
old_centers = [center.copy() for center in final_centers]
for cluster_idx in range(k):
cluster_samples = clusters[cluster_idx]
if not cluster_samples:
final_centers[cluster_idx] = data[random.randint(0, n_samples - 1)].copy()
continue
new_center = []
for dim in range(n_features):
dim_sum = sum(data[s_idx][dim] for s_idx in cluster_samples)
new_center.append(dim_sum / len(cluster_samples))
final_centers[cluster_idx] = new_center
total_center_dist = 0.0
for old_c, new_c in zip(old_centers, final_centers):
dist = sum((o - n) ** 2 for o, n in zip(old_c, new_c)) ** 0.5
total_center_dist += dist
if total_center_dist < epsilon:
print(f"迭代 {iter_cnt + 1} 次后收敛(中心总变化量:{total_center_dist:.6f} < ε={epsilon})")
break
else:
print(f"已达到最大迭代次数 {iteration},未完全收敛(最终中心总变化量:{total_center_dist:.6f})")
return labels, final_centers
|
2301_80822435/machine-learning-course
|
assignment3/1班13.py
|
Python
|
mit
| 2,511
|
import math
import random
def assign_cluster(x, centers):
"""
将样本分配到最近的聚类中心
x: 单个样本(列表或元组)
centers: 聚类中心列表(每个元素为样本格式)
return: 最近聚类中心的索引
"""
min_dist = float('inf')
cluster_idx = 0
for i, c in enumerate(centers):
# 计算欧氏距离
dist = math.sqrt(sum([(a - b) **2 for a, b in zip(x, c)]))
if dist < min_dist:
min_dist = dist
cluster_idx = i
return cluster_idx
def Kmeans(data, k, epsilon=1e-6, iteration=100):
"""
K均值聚类算法实现
data: 样本列表(每个样本为可迭代对象)
k: 聚类数量
epsilon: 中心变化阈值(小于此值认为收敛)
iteration: 最大迭代次数
return: (聚类结果, 最终聚类中心)
"""
# 检查输入有效性
if k <= 0 or k > len(data):
raise ValueError("k值必须为正整数且不大于样本数量")
if len(data) == 0:
raise ValueError("数据不能为空")
# 初始化聚类中心(随机选择k个不同样本)
centers = random.sample(data, k)
n_features = len(data[0])
for _ in range(iteration):
# 分配样本到聚类
clusters = [[] for _ in range(k)]
for x in data:
idx = assign_cluster(x, centers)
clusters[idx].append(x)
# 计算新的聚类中心
new_centers = []
for cluster in clusters:
if not cluster: # 避免空聚类(随机重置中心)
new_center = random.choice(data)
else:
# 计算每个特征的均值
new_center = [sum(dim) / len(cluster) for dim in zip(*cluster)]
new_centers.append(new_center)
# 检查收敛(所有中心变化小于epsilon)
center_changes = [
math.sqrt(sum([(a - b)** 2 for a, b in zip(c, nc)]))
for c, nc in zip(centers, new_centers)
]
if max(center_changes) < epsilon:
break
centers = new_centers
# 生成最终聚类结果(每个样本对应的聚类索引)
result = [assign_cluster(x, centers) for x in data]
return result, centers
|
2301_80822435/machine-learning-course
|
assignment3/1班18.py
|
Python
|
mit
| 2,311
|
import math
import random
def assign_cluster(x, centers):
"""
将样本x分配到最近的聚类中心
参数:
x: 单个样本(列表/元组,如[1,2,3])
centers: 聚类中心列表(每个元素为样本格式)
返回:
最近聚类中心的索引(整数)
"""
min_distance = float('inf') # 初始化最小距离为无穷大
best_cluster = 0 # 初始化最佳聚类索引
for i, center in enumerate(centers):
# 计算欧氏距离:sqrt(sum((x_i - c_i)^2))
distance = math.sqrt(sum((a - b)** 2 for a, b in zip(x, center)))
# 更新最小距离和最佳聚类
if distance < min_distance:
min_distance = distance
best_cluster = i
return best_cluster
def Kmeans(data, k, epsilon=1e-5, iteration=100):
"""
K均值聚类主函数
参数:
data: 样本集(列表,每个元素为样本)
k: 聚类数量(正整数)
epsilon: 收敛阈值(中心变化小于该值则停止,默认1e-5)
iteration: 最大迭代次数(默认100)
返回:
centers: 最终聚类中心列表
labels: 每个样本的聚类标签列表(与data顺序对应)
"""
# 输入合法性校验
if not data:
raise ValueError("输入数据不能为空")
if k <= 0 or k > len(data):
raise ValueError(f"k值必须为(0, {len(data)}]之间的整数")
if iteration <= 0:
raise ValueError("迭代次数必须为正整数")
# 初始化聚类中心:从数据中随机选择k个样本
centers = random.sample(data, k)
n_features = len(data[0]) # 特征维度
for _ in range(iteration):
# 1. 分配阶段:将所有样本分配到最近的聚类中心
clusters = [[] for _ in range(k)] # 存储每个聚类的样本
for sample in data:
cluster_idx = assign_cluster(sample, centers)
clusters[cluster_idx].append(sample)
# 2. 更新阶段:计算新的聚类中心(均值)
new_centers = []
for cluster in clusters:
if not cluster: # 处理空聚类(重新随机选一个样本)
new_center = random.choice(data)
else:
# 按特征维度计算均值(zip(*cluster)将样本按特征维度分组)
new_center = [sum(dim) / len(cluster) for dim in zip(*cluster)]
new_centers.append(new_center)
# 3. 收敛检查:计算新旧中心的最大距离
max_change = 0.0
for old, new in zip(centers, new_centers):
change = math.sqrt(sum((a - b)** 2 for a, b in zip(old, new)))
if change > max_change:
max_change = change
# 如果中心变化小于阈值,提前停止迭代
if max_change < epsilon:
centers = new_centers
break
centers = new_centers # 更新中心继续迭代
# 生成最终的样本标签
labels = [assign_cluster(sample, centers) for sample in data]
return centers, labels
|
2301_80822435/machine-learning-course
|
assignment3/1班22.py
|
Python
|
mit
| 3,179
|
import math
import random
def euclidean_distance(point1, point2):
#计算欧几里得距离
return math.sqrt(sum((a - b) ** 2 for a, b in zip(point1, point2)))
def assign_cluster(X, centers):
# X: list of lists, 数据点列表 centers: list of lists, 聚类中心列表 labels: 每个样本所属的簇索引
labels = []
for point in X:
min_dist = float('inf')
best_center = 0
for j, center in enumerate(centers):
dist = euclidean_distance(point, center)
if dist < min_dist:
min_dist = dist
best_center = j
labels.append(best_center)
return labels
def kmeans(X, k, epsilon=1e-4, max_iterations=100):
#X: 数据集 k: 聚类数 epsilon: 收敛阈值 max_iterations: 最大迭代次数 centers: 聚类中心 labels: 聚类标签
# 随机初始化中心
centers = random.sample(X, k)
for iteration in range(max_iterations):
# 分配簇
labels = assign_cluster(X, centers)
# 更新中心
new_centers = []
for j in range(k):
# 获取属于该簇的所有点
cluster_points = [X[i] for i in range(len(X)) if labels[i] == j]
if len(cluster_points) > 0:
# 计算新中心(均值)
n_features = len(X[0])
new_center = [
sum(point[d] for point in cluster_points) / len(cluster_points)
for d in range(n_features)
]
new_centers.append(new_center)
else:
# 空簇处理:重新随机初始化
new_centers.append(random.choice(X))
# 检查收敛
center_shift = sum(
euclidean_distance(centers[j], new_centers[j])
for j in range(k)
)
if center_shift < epsilon:
break
centers = new_centers
return centers, labels
|
2301_80822435/machine-learning-course
|
assignment3/1班23.py
|
Python
|
mit
| 2,012
|
import random
import math
def assign_cluster(x, c):
min_dist = float('inf')
cluster_idx = 0
for i, center in enumerate(c):
# 计算欧氏距离
dist = math.sqrt(sum([(a - b) ** 2 for a, b in zip(x, center)]))
if dist < min_dist:
min_dist = dist
cluster_idx = i
return cluster_idx
def Kmeans(data, k, epsilon, iteration):
# 输入检查
if not data:
raise ValueError("数据不能为空")
if k <= 0 or k > len(data):
raise ValueError(f"k值必须在1到{len(data)}之间")
if iteration <= 0:
raise ValueError("迭代次数必须为正整数")
# 初始化聚类中心:随机选择k个不重复样本
n_samples = len(data)
centers = random.sample(data, k)
prev_centers = None
labels = [0] * n_samples # 记录每个样本的聚类标签
for _ in range(iteration):
# 分配样本到最近的聚类中心
for i, sample in enumerate(data):
labels[i] = assign_cluster(sample, centers)
# 更新聚类中心
new_centers = []
for i in range(k):
# 找到当前聚类的所有样本
cluster_samples = [data[j] for j in range(n_samples) if labels[j] == i]
if not cluster_samples: # 避免空聚类(重新随机选择一个中心)
new_center = random.choice(data)
else:
# 计算每个特征的均值作为新中心
n_features = len(data[0])
new_center = [sum(sample[f] for sample in cluster_samples) / len(cluster_samples) for f in range(n_features)]
new_centers.append(new_center)
# 检查是否收敛
if prev_centers is not None:
# 计算所有中心的总变化量
total_change = sum(math.sqrt(sum((a - b) ** 2 for a, b in zip(prev, new))) for prev, new in zip(prev_centers, new_centers))
if total_change < epsilon:
print(f"迭代 {_ + 1} 次后收敛")
break
prev_centers = new_centers
centers = new_centers
else:
print(f"达到最大迭代次数 {iteration},未完全收敛")
return labels, centers
|
2301_80822435/machine-learning-course
|
assignment3/1班28.py
|
Python
|
mit
| 2,233
|
import numpy as np
import matplotlib.pyplot as plt
# 聚类的类
class JuLei:
def __init__(self, ge_shu=2, zui_da_ci_shu=100, cha_zhi=0.0001):
self.ge_shu = ge_shu # 要聚成几类
self.zui_da_ci_shu = zui_da_ci_shu # 最多迭代多少次
self.cha_zhi = cha_zhi # 中心点变化小于这个就停
self.zhong_xin = None # 存放中心点
self.biao_qian = None # 每个点的类别标签
def xun_lian(self, shu_ju):
# 训练模型,shu_ju是输入的数据
ge_shu, te_zheng = shu_ju.shape # 多少个样本,多少个特征
# 随便选几个点当初始中心点
suiji_bianhao = np.random.choice(ge_shu, self.ge_shu, replace=False)
self.zhong_xin = shu_ju[suiji_bianhao]
for _ in range(self.zui_da_ci_shu):
# 给每个点分标签
self.biao_qian = self.fen_lei(shu_ju)
# 重新算中心点
xin_zhong_xin = self.suan_zhong_xin(shu_ju)
# 看看中心点变了多少,差不多就停
bianhua = 0
for i in range(self.ge_shu):
bianhua = bianhua + np.linalg.norm(xin_zhong_xin[i] - self.zhong_xin[i])
if bianhua < self.cha_zhi:
break
self.zhong_xin = xin_zhong_xin
def fen_lei(self, shu_ju):
# 给每个数据点分配类别
biao_qian_list = []
for dian in shu_ju:
# 算每个点到所有中心点的距离
ju_li_list = []
for zx in self.zhong_xin:
ju_li = np.sqrt(np.sum((dian - zx) **2)) # 欧氏距离
ju_li_list.append(ju_li)
# 找最近的那个中心点的序号
zui_jin = np.argmin(ju_li_list)
biao_qian_list.append(zui_jin)
return np.array(biao_qian_list)
def suan_zhong_xin(self, shu_ju):
# 计算新的中心点
xin_zhong_xin_list = []
for i in range(self.ge_shu):
# 找出属于第i类的所有点
lei_nei_dian = []
for j in range(len(shu_ju)):
if self.biao_qian[j] == i:
lei_nei_dian.append(shu_ju[j])
lei_nei_dian = np.array(lei_nei_dian)
# 算平均值当新中心点
xin_zx = np.mean(lei_nei_dian, axis=0)
xin_zhong_xin_list.append(xin_zx)
return np.array(xin_zhong_xin_list)
# 测试一下
if __name__ == "__main__":
# 自己造点数据
def zao_shu_ju():
np.random.seed(10)
shu_ju1 = np.random.normal(0, 1, (100, 2)) # 第一类
shu_ju2 = np.random.normal(5, 1, (100, 2)) # 第二类
shu_ju3 = np.random.normal(10, 1, (100, 2)) # 第三类
return np.vstack((shu_ju1, shu_ju2, shu_ju3))
# 生成数据
my_shu_ju = zao_shu_ju()
# 聚类
my_julei = JuLei(ge_shu=3) # 聚成3类
my_julei.xun_lian(my_shu_ju)
biao_qian = my_julei.biao_qian
zhong_xin = my_julei.zhong_xin
# 画图
plt.scatter(my_shu_ju[:, 0], my_shu_ju[:, 1], c=biao_qian, s=30)
plt.scatter(zhong_xin[:, 0], zhong_xin[:, 1], c='red', marker='x', s=200)
plt.title('聚类结果')
|
2301_80822435/machine-learning-course
|
assignment3/1班29.py
|
Python
|
mit
| 3,204
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
class KMeans:
def __init__(self, k=3, max_iters=100, tol=1e-4, random_state=None):
"""
KMeans聚类算法
参数:
k: 聚类数量
max_iters: 最大迭代次数
tol: 收敛阈值(质心变化小于该值时停止迭代)
random_state: 随机种子
"""
self.k = k
self.max_iters = max_iters
self.tol = tol
self.random_state = random_state
self.centroids = None
self.labels = None
self.inertia_ = None
def _initialize_centroids(self, X):
"""初始化质心 - 使用KMeans++方法"""
np.random.seed(self.random_state)
n_samples, n_features = X.shape
# 第一个质心随机选择
centroids = [X[np.random.randint(n_samples)]]
# 选择剩余的k-1个质心
for _ in range(1, self.k):
# 计算每个样本到最近质心的距离
distances = np.array([min([np.linalg.norm(x - c)**2 for c in centroids])
for x in X])
# 根据距离的概率分布选择下一个质心
probabilities = distances / distances.sum()
cumulative_probs = probabilities.cumsum()
r = np.random.rand()
for i, p in enumerate(cumulative_probs):
if r < p:
centroids.append(X[i])
break
return np.array(centroids)
def _assign_clusters(self, X, centroids):
"""将样本分配到最近的质心"""
distances = np.linalg.norm(X[:, np.newaxis] - centroids, axis=2)
return np.argmin(distances, axis=1)
def _update_centroids(self, X, labels):
"""更新质心为每个簇的均值"""
new_centroids = np.array([X[labels == i].mean(axis=0)
for i in range(self.k)])
return new_centroids
def _calculate_inertia(self, X, labels, centroids):
"""计算簇内平方和(inertia)"""
inertia = 0
for i in range(self.k):
cluster_points = X[labels == i]
if len(cluster_points) > 0:
inertia += np.sum((cluster_points - centroids[i])**2)
return inertia
def fit(self, X):
"""
训练KMeans模型
参数:
X: 输入数据,形状为(n_samples, n_features)
"""
n_samples, n_features = X.shape
# 初始化质心
self.centroids = self._initialize_centroids(X)
# 迭代优化
for iteration in range(self.max_iters):
# 分配样本到簇
self.labels = self._assign_clusters(X, self.centroids)
# 更新质心
new_centroids = self._update_centroids(X, self.labels)
# 检查收敛
centroid_shift = np.linalg.norm(new_centroids - self.centroids, axis=1).max()
if centroid_shift < self.tol:
print(f"收敛于第 {iteration + 1} 次迭代")
break
self.centroids = new_centroids
# 计算最终的簇内平方和
self.inertia_ = self._calculate_inertia(X, self.labels, self.centroids)
return self
def predict(self, X):
"""预测新样本的簇标签"""
return self._assign_clusters(X, self.centroids)
def fit_predict(self, X):
"""训练模型并返回预测标签"""
self.fit(X)
return self.labels
# 测试和可视化
def test_kmeans():
# 生成测试数据
X, y_true = make_blobs(n_samples=300, centers=4, n_features=2,
random_state=42, cluster_std=0.60)
# 创建并训练KMeans模型
kmeans = KMeans(k=4, random_state=42)
labels = kmeans.fit_predict(X)
# 可视化结果
plt.figure(figsize=(15, 5))
# 原始数据
plt.subplot(1, 3, 1)
plt.scatter(X[:, 0], X[:, 1], c=y_true, cmap='viridis', s=50, alpha=0.7)
plt.title('原始数据')
plt.xlabel('特征 1')
plt.ylabel('特征 2')
# KMeans聚类结果
plt.subplot(1, 3, 2)
plt.scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis', s=50, alpha=0.7)
plt.scatter(kmeans.centroids[:, 0], kmeans.centroids[:, 1],
c='red', marker='X', s=200, label='质心')
plt.title('KMeans聚类结果')
plt.xlabel('特征 1')
plt.ylabel('特征 2')
plt.legend()
# 对比真实标签和预测标签
plt.subplot(1, 3, 3)
plt.scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis', s=50, alpha=0.7)
plt.scatter(kmeans.centroids[:, 0], kmeans.centroids[:, 1],
c='red', marker='X', s=200, label='质心')
# 标记分类错误的点
incorrect = labels != y_true
plt.scatter(X[incorrect, 0], X[incorrect, 1],
facecolors='none', edgecolors='black', s=100,
linewidth=2, label='分类错误')
plt.title('分类错误点标记')
plt.xlabel('特征 1')
plt.ylabel('特征 2')
plt.legend()
plt.tight_layout()
plt.show()
# 打印模型信息
print(f"簇内平方和 (Inertia): {kmeans.inertia_:.4f}")
print(f"质心位置:\n{kmeans.centroids}")
# 肘部法则确定最佳K值
def elbow_method(X, max_k=10):
"""使用肘部法则确定最佳聚类数量"""
inertias = []
k_range = range(1, max_k + 1)
for k in k_range:
kmeans = KMeans(k=k, random_state=42)
kmeans.fit(X)
inertias.append(kmeans.inertia_)
plt.figure(figsize=(10, 6))
plt.plot(k_range, inertias, 'bo-')
plt.xlabel('聚类数量 K')
plt.ylabel('簇内平方和 (Inertia)')
plt.title('肘部法则 - 确定最佳K值')
plt.grid(True)
plt.show()
if __name__ == "__main__":
# 生成测试数据
X, _ = make_blobs(n_samples=300, centers=4, n_features=2,
random_state=42, cluster_std=0.60)
# 测试KMeans算法
test_kmeans()
# 使用肘部法则
elbow_method(X)
|
2301_80822435/machine-learning-course
|
assignment3/1班31.py
|
Python
|
mit
| 6,294
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
class KMeans:
def __init__(self, k=3, max_iters=100, tol=1e-4):
self.k = k
self.max_iters = max_iters
self.tol = tol
self.centroids = None
self.labels = None
def fit(self, X):
"""训练KMeans模型"""
n_samples, n_features = X.shape
# 随机初始化中心点
indices = np.random.choice(n_samples, self.k, replace=False)
self.centroids = X[indices]
for _ in range(self.max_iters):
# 分配样本到最近的中心点
distances = self._compute_distances(X)
self.labels = np.argmin(distances, axis=1)
# 更新中心点
new_centroids = np.array([X[self.labels == i].mean(axis=0) for i in range(self.k)])
# 检查收敛
if np.linalg.norm(new_centroids - self.centroids) < self.tol:
break
self.centroids = new_centroids
return self
def _compute_distances(self, X):
"""计算样本到所有中心点的距离"""
distances = np.zeros((X.shape[0], self.k))
for i, centroid in enumerate(self.centroids):
distances[:, i] = np.linalg.norm(X - centroid, axis=1)
return distances
def predict(self, X):
"""预测样本所属聚类"""
distances = self._compute_distances(X)
return np.argmin(distances, axis=1)
# 测试示例
if __name__ == "__main__":
# 生成测试数据
X, _ = make_blobs(n_samples=300, centers=3, n_features=2, random_state=42)
# 训练KMeans
kmeans = KMeans(k=3)
kmeans.fit(X)
labels = kmeans.labels
# 可视化结果
plt.figure(figsize=(10, 4))
plt.subplot(121)
plt.scatter(X[:, 0], X[:, 1], c='gray', alpha=0.6)
plt.title("原始数据")
plt.subplot(122)
for i in range(3):
cluster_points = X[labels == i]
plt.scatter(cluster_points[:, 0], cluster_points[:, 1], label=f'Cluster {i}')
plt.scatter(kmeans.centroids[:, 0], kmeans.centroids[:, 1],
marker='x', s=200, linewidths=3, color='black', label='Centroids')
plt.title("KMeans聚类结果")
plt.legend()
plt.tight_layout()
plt.show()
print("聚类中心坐标:")
print(kmeans.centroids)
|
2301_80822435/machine-learning-course
|
assignment3/1班32.py
|
Python
|
mit
| 2,467
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
class KMeans:
def __init__(self, k=3, max_iters=100, random_state=42):
self.k = k
self.max_iters = max_iters
self.random_state = random_state
self.centroids = None
self.labels = None
def initialize_centroids(self, X):
"""随机初始化质心"""
np.random.seed(self.random_state)
random_idx = np.random.permutation(X.shape[0])
centroids = X[random_idx[:self.k]]
return centroids
def compute_distance(self, X, centroids):
"""计算每个点到质心的距离"""
distances = np.zeros((X.shape[0], self.k))
for i, centroid in enumerate(centroids):
distances[:, i] = np.linalg.norm(X - centroid, axis=1)
return distances
def find_closest_centroid(self, distances):
"""找到最近的质心"""
return np.argmin(distances, axis=1)
def compute_centroids(self, X, labels):
"""重新计算质心"""
centroids = np.zeros((self.k, X.shape[1]))
for i in range(self.k):
centroids[i] = np.mean(X[labels == i], axis=0)
return centroids
def fit(self, X):
"""训练K-means模型"""
# 初始化质心
self.centroids = self.initialize_centroids(X)
for _ in range(self.max_iters):
# 计算距离
distances = self.compute_distance(X, self.centroids)
# 分配标签
self.labels = self.find_closest_centroid(distances)
# 重新计算质心
new_centroids = self.compute_centroids(X, self.labels)
# 检查收敛
if np.allclose(self.centroids, new_centroids):
break
self.centroids = new_centroids
return self
def predict(self, X):
"""预测新数据的类别"""
distances = self.compute_distance(X, self.centroids)
return self.find_closest_centroid(distances)
# 测试K-means算法
def test_kmeans():
# 生成测试数据
X, y_true = make_blobs(n_samples=300, centers=3,
cluster_std=0.60, random_state=0)
# 应用K-means
kmeans = KMeans(k=3)
kmeans.fit(X)
y_pred = kmeans.labels
# 可视化结果
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.scatter(X[:, 0], X[:, 1], c=y_true, cmap='viridis')
plt.title('真实标签')
plt.colorbar()
plt.subplot(1, 2, 2)
plt.scatter(X[:, 0], X[:, 1], c=y_pred, cmap='viridis')
plt.scatter(kmeans.centroids[:, 0], kmeans.centroids[:, 1],
marker='x', s=200, linewidths=3, color='red')
plt.title('K-means聚类结果')
plt.colorbar()
plt.tight_layout()
plt.show()
return kmeans
# 运行K-means测试
kmeans_model = test_kmeans()
|
2301_80822435/machine-learning-course
|
assignment3/1班34.py
|
Python
|
mit
| 3,002
|
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def assign_cluster(x, c):
distances = np.linalg.norm(x[:, np.newaxis] - c, axis=2)
return np.argmin(distances, axis=1)
def Kmeans(data, k, epsilon=1e-4, iteration=100):
n_samples, n_features = data.shape
np.random.seed(0)
centroids = data[np.random.choice(n_samples, k, replace=False)]
for i in range(iteration):
labels = assign_cluster(data, centroids)
new_centroids = np.array([
data[labels == j].mean(axis=0) if np.any(labels == j) else centroids[j]
for j in range(k)
])
shift = np.linalg.norm(new_centroids - centroids)
if shift < epsilon:
print(f"K-means算法在第 {i + 1} 次迭代后收敛 (质心移动距离: {shift:.6f})")
break
centroids = new_centroids
return labels, centroids
if __name__ == "__main__":
np.random.seed(42)
data = np.vstack([
np.random.randn(50, 2) + np.array([0, 0]),
np.random.randn(50, 2) + np.array([5, 5]),
np.random.randn(50, 2) + np.array([10, 0])
])
Y, C = Kmeans(data, k=3)
print("聚类结果标签:", Y)
print("聚类中心:\n", C)
plt.scatter(data[:, 0], data[:, 1], c=Y, cmap='viridis', s=30)
plt.scatter(C[:, 0], C[:, 1], c='red', marker='X', s=200, label='Centroids')
plt.legend()
plt.title("K-Means 聚类结果")
plt.show()
|
2301_80822435/machine-learning-course
|
assignment3/1班73.py
|
Python
|
mit
| 1,515
|
import math
import random
def assign_cluster(x, c):
min_dist = float('inf')
cluster_idx = 0
for i, center in enumerate(c):
dist_sq = 0.0
for xi, ci in zip(x, center):
dist_sq += (xi - ci) ** 2
dist = math.sqrt(dist_sq)
if dist < min_dist:
min_dist = dist
max_dist = dist
cluster_idx = i
return cluster_idx
def Kmeans(data, k, epsilon=1e-3, iteration=100):
if len(data) == 0:
raise ValueError("数据集不能为空")
if k < 1 or k > len(data):
raise ValueError(f"k值必须在1到{len(data)}之间")
dim = len(data[0])
for x in data:
if len(x) != dim:
raise ValueError("所有数据点必须具有相同的维度")
data_indices = list(range(len(data)))
random.shuffle(data_indices)
centers = [data[i].copy() for i in data_indices[:k]]
iter_count = 0
while iter_count < iteration:
clusters = [[] for _ in range(k)]
for x in data:
cluster_idx = assign_cluster(x, centers)
clusters[cluster_idx].append(x)
new_centers = []
for cluster in clusters:
if not cluster:
new_center = random.choice(data).copy()
else:
new_center = []
for d in range(dim):
avg = sum(x[d] for x in cluster) / len(cluster)
new_center.append(avg)
new_centers.append(new_center)
converge = True
for old_c, new_c in zip(centers, new_centers):
dist_sq = 0.0
for oc, nc in zip(old_c, new_c):
dist_sq += (oc - nc) ** 2
dist = math.sqrt(dist_sq)
if dist > epsilon:
converge = False
break
centers = new_centers
iter_count += 1
if converge:
print(f"迭代{iter_count}次后收敛")
break
if not converge:
print(f"达到最大迭代次数{iteration},未完全收敛")
return clusters, centers, iter_count
if __name__ == "__main__":
random.seed(42)
test_data = []
for _ in range(10):
x = 2 + random.gauss(0, 0.5)
y = 3 + random.gauss(0, 0.5)
test_data.append([x, y])
for _ in range(10):
x = 8 + random.gauss(0, 0.5)
y = 7 + random.gauss(0, 0.5)
test_data.append([x, y])
for _ in range(10):
x = 5 + random.gauss(0, 0.5)
y = 10 + random.gauss(0, 0.5)
test_data.append([x, y])
k = 3
epsilon = 1e-4
max_iter = 50
clusters, final_centers, iter_num = Kmeans(test_data, k, epsilon, max_iter)
print(f"\n最终聚类中心:")
for i, center in enumerate(final_centers):
print(f"聚类{i + 1}中心:{[round(c, 4) for c in center]}")
print(f"\n每个聚类的数据点数量:")
for i, cluster in enumerate(clusters):
print(f"聚类{i + 1}:{len(cluster)}个点")
|
2301_80822435/machine-learning-course
|
assignment3/2班37.py
|
Python
|
mit
| 2,999
|
import random
import math
from collections import defaultdict
def euclidean_distance(p1, p2):
if len(p1) != len(p2):
raise ValueError("Points must have the same number of dimensions")
sum_squares = 0
for i in range(len(p1)):
sum_squares += (p1[i] - p2[i]) ** 2
return math.sqrt(sum_squares)
def assign_cluster(x, c):
min_dist = float('inf')
min_index = -1
for i, centroid in enumerate(c):
dist = euclidean_distance(x, centroid)
if dist < min_dist:
min_dist = dist
min_index = i
return min_index
def Kmeans(data, k, epsilon=1e-4, max_iterations=100):
if k > len(data):
raise ValueError("K cannot be greater than the number of data points.")
initial_indices = random.sample(range(len(data)), k)
centroids = [list(data[i]) for i in initial_indices]
loss_history = []
print(f"初始质心: {centroids}")
for i in range(max_iterations):
clusters = [-1] * len(data)
for point_idx, point in enumerate(data):
cluster_idx = assign_cluster(point, centroids)
clusters[point_idx] = cluster_idx
loss = 0.0
for point_idx, point in enumerate(data):
cluster_idx = clusters[point_idx]
centroid = centroids[cluster_idx]
loss += euclidean_distance(point, centroid) ** 2 # 使用平方距离作为损失
loss_history.append(loss)
new_centroids = []
for j in range(k):
cluster_points = [data[p_idx] for p_idx in range(len(data)) if clusters[p_idx] == j]
if not cluster_points:
new_centroids.append(centroids[j])
continue
num_points = len(cluster_points)
num_dimensions = len(cluster_points[0])
new_centroid = [0.0] * num_dimensions
for dim in range(num_dimensions):
sum_dim = sum(point[dim] for point in cluster_points)
new_centroid[dim] = sum_dim / num_points
new_centroids.append(new_centroid)
total_shift = sum(euclidean_distance(centroids[j], new_centroids[j]) for j in range(k))
print(f"--- 迭代 {i + 1} ---")
print(f"新质心: {new_centroids}")
print(f"损失: {loss:.4f}, 质心总位移: {total_shift:.6f}")
if total_shift < epsilon:
print(f"\n算法在 {i + 1} 次迭代后收敛。")
return new_centroids, clusters, loss_history
centroids = new_centroids
print(f"\n在 {max_iterations} 次迭代后未达到收敛阈值。")
return centroids, clusters, loss_history
if __name__ == "__main__":
dataset = [
[1.0, 2.0], [1.2, 1.8], [0.8, 2.2], [1.1, 1.9],
[5.0, 8.0], [5.2, 8.1], [4.8, 7.9], [5.1, 8.2],
[9.0, 2.0], [9.2, 1.8], [8.8, 2.2], [9.1, 1.9],
[5.0, 5.0], [5.2, 4.8], [4.8, 5.2], [5.1, 4.9], [4.9, 5.1]
]
print(f'数据集大小: {len(dataset)}')
K = 4
EPSILON = 1e-4
MAX_ITER = 100
random.seed(0)
final_centroids, final_clusters, loss_history = Kmeans(dataset, K, EPSILON, MAX_ITER)
print("\n==================== 最终结果 ====================")
print("最终质心:")
for i, centroid in enumerate(final_centroids):
print(f" 簇 {i + 1}: ({centroid[0]:.4f}, {centroid[1]:.4f})")
print("\n聚类分配 (数据点索引 -> 簇索引):")
for i, cluster_idx in enumerate(final_clusters):
print(f" 点 {i} -> 簇 {cluster_idx}")
print("\n损失函数历史:")
for i, loss in enumerate(loss_history):
print(f" 迭代 {i + 1}: {loss:.4f}")
|
2301_80822435/machine-learning-course
|
assignment3/2班39.py
|
Python
|
mit
| 3,654
|
import math
import random
def assign_cluster(x, c):
distances = []
for center in c:
dist = math.sqrt(sum((x[i] - center[i])**2 for i in range(len(x))))
distances.append(dist)
return distances.index(min(distances))
def Kmeans(data, k, epsilon=1e-4, iteration=100):
if not data or k <= 0 or k > len(data):
raise ValueError("Invalid input parameters")
n_features = len(data[0])
centroids = random.sample(data, k)
for iter_count in range(iteration):
clusters = {i: [] for i in range(k)}
for point in data:
cluster_idx = assign_cluster(point, centroids)
clusters[cluster_idx].append(point)
new_centroids = []
for i in range(k):
if clusters[i]:
centroid = []
for j in range(n_features):
dim_sum = sum(point[j] for point in clusters[i])
centroid.append(dim_sum / len(clusters[i]))
new_centroids.append(centroid)
else:
new_centroids.append(centroids[i])
max_change = 0
for i in range(k):
change = math.sqrt(sum((centroids[i][j] - new_centroids[i][j])**2 for j in range(n_features)))
max_change = max(max_change, change)
centroids = new_centroids
if max_change < epsilon:
print(f"Converged after {iter_count + 1} iterations")
break
return clusters, centroids
if __name__ == "__main__":
random.seed(42)
sample_data = []
for center in [[2, 2], [8, 8], [8, 2]]:
for _ in range(10):
sample_data.append([
center[0] + random.gauss(0, 1.5),
center[1] + random.gauss(0, 1.5)
])
clusters, centroids = Kmeans(sample_data, k=3, epsilon=1e-4, iteration=100)
print("聚类中心:")
for i, centroid in enumerate(centroids):
print(f"聚类 {i}: ({centroid[0]:.2f}, {centroid[1]:.2f})")
print("\n聚类结果:")
for cluster_idx, points in clusters.items():
print(f"聚类 {cluster_idx}: {len(points)} 个点")
for point in points:
print(f" ({point[0]:.2f}, {point[1]:.2f})")
|
2301_80822435/machine-learning-course
|
assignment3/2班40.py
|
Python
|
mit
| 2,271
|
import math
import random
def assign_cluster(x, centers):
min_dist = float('inf')
cluster_idx = 0
for i, c in enumerate(centers):
# 计算欧氏距离
dist = math.sqrt(sum([(a - b) ** 2 for a, b in zip(x, c)]))
if dist < min_dist:
min_dist = dist
cluster_idx = i
return cluster_idx
def Kmeans(data, k, epsilon=1e-4, iteration=100):
if k <= 0 or k > len(data):
raise ValueError("k值必须为正且不大于样本数量")
if len(data) == 0:
raise ValueError("数据集不能为空")
# 初始化聚类中心
n_samples = len(data)
n_features = len(data[0])
centers_idx = random.sample(range(n_samples), k)
centers = [data[i] for i in centers_idx]
for _ in range(iteration):
# 分配每个样本到最近的聚类中心
clusters = [[] for _ in range(k)] # 存储每个聚类的样本索引
for i, x in enumerate(data):
c_idx = assign_cluster(x, centers)
clusters[c_idx].append(i)
# 计算新的聚类中心
new_centers = []
for cluster in clusters:
if not cluster: # 防止空聚类
new_center = data[random.randint(0, n_samples - 1)]
else:
# 计算每个特征的均值
new_center = []
for j in range(n_features):
mean_val = sum([data[i][j] for i in cluster]) / len(cluster)
new_center.append(mean_val)
new_centers.append(new_center)
# 检查是否收敛(所有中心变化都小于epsilon)
center_changes = [
math.sqrt(sum([(a - b) ** 2 for a, b in zip(old, new)]))
for old, new in zip(centers, new_centers)
]
if max(center_changes) < epsilon:
centers = new_centers
break
centers = new_centers
return clusters, centers
#测试数据
if __name__ == "__main__":
data = [
[1, 2], [1, 4], [1, 0],
[10, 2], [10, 4], [10, 0],
[5, 5], [6, 6], [7, 7]
]
# 聚类(k=3)
clusters, centers = Kmeans(data, k=3, epsilon=1e-3, iteration=50)
# 输出结果
print("聚类中心:")
for i, center in enumerate(centers):
print(f"中心 {i + 1}: {center}")
print("\n聚类结果:")
for i, cluster in enumerate(clusters):
print(f"聚类 {i + 1} 的样本:{[data[idx] for idx in cluster]}")
|
2301_80822435/machine-learning-course
|
assignment3/2班41.py
|
Python
|
mit
| 2,479
|
import math
import random
def Kmeans(data, k, epsilon=1e-5, iteration=100):
"""
K均值聚类主函数
:param data: 输入数据,格式为列表嵌套列表,每个子列表是一个样本向量(如[[x1,y1], [x2,y2], ...])
:param k: 聚类数量
:param epsilon: 收敛阈值(centroids变化量小于该值则停止迭代)
:param iteration: 最大迭代次数(防止无限循环)
:return: (clusters, centroids)
clusters: 聚类结果,列表,每个元素是对应样本的聚类标签(0~k-1)
centroids: 最终聚类中心,列表嵌套列表
"""
# 输入合法性检查
if not data or k <= 0 or k > len(data):
raise ValueError("数据不能为空,且k需满足 0 < k ≤ 样本数量")
if not all(isinstance(sample, (list, tuple)) for sample in data):
raise TypeError("每个样本必须是列表或元组类型的向量")
sample_dim = len(data[0])
if not all(len(sample) == sample_dim for sample in data):
raise ValueError("所有样本必须具有相同的维度")
# 1. 初始化聚类中心(随机选择k个不同的样本作为初始centroids)
centroids = random.sample(data, k)
# 转换为列表类型(确保可修改),并保留浮点数精度
centroids = [list(map(float, centroid)) for centroid in centroids]
iter_count = 0
while iter_count < iteration:
# 2. 分配聚类标签:为每个样本分配到最近的centroid
clusters = [assign_cluster(sample, centroids) for sample in data]
# 3. 更新聚类中心:计算每个聚类的均值作为新centroid
new_centroids = []
for cluster_id in range(k):
# 收集当前聚类的所有样本
cluster_samples = [data[i] for i in range(len(data)) if clusters[i] == cluster_id]
if not cluster_samples: # 防止空聚类(极端情况)
new_centroids.append(centroids[cluster_id]) # 保留原中心
continue
# 计算每个维度的均值
dim_means = []
for dim in range(sample_dim):
dim_sum = sum(sample[dim] for sample in cluster_samples)
dim_means.append(dim_sum / len(cluster_samples))
new_centroids.append(dim_means)
# 4. 检查收敛:计算所有centroid的变化量(欧氏距离)
centroid_changes = []
for old_c, new_c in zip(centroids, new_centroids):
dist = math.sqrt(sum((old - new)**2 for old, new in zip(old_c, new_c)))
centroid_changes.append(dist)
# 若所有中心变化量均小于epsilon,收敛并退出
if max(centroid_changes) < epsilon:
break
# 否则更新centroids,继续迭代
centroids = new_centroids
iter_count += 1
return clusters, centroids
def assign_cluster(x, c):
"""
为单个样本x分配到最近的聚类中心
:param x: 单个样本向量(列表/元组)
:param c: 聚类中心列表(列表嵌套列表)
:return: 最近聚类中心的索引(int,0~len(c)-1)
"""
min_dist = float('inf')
best_cluster = 0
for cluster_id, centroid in enumerate(c):
# 计算欧氏距离:sqrt(sum((x_i - c_i)^2))
dist = math.sqrt(sum((xi - ci)**2 for xi, ci in zip(x, centroid)))
if dist < min_dist:
min_dist = dist
dist = min_dist
best_cluster = cluster_id
return best_cluster
|
2301_80822435/machine-learning-course
|
assignment3/2班42.py
|
Python
|
mit
| 3,577
|
import math
import random
random.seed(42)
def assign_cluster(x, c):
min_dist = float('inf') # 初始化最小距离为无穷大
cluster_idx = 0 # 初始化聚类索引
for idx, center in enumerate(c):
dist = math.sqrt(sum((xi - ci) ** 2 for xi, ci in zip(x, center)))
if dist < min_dist:
min_dist = dist
cluster_idx = idx
return cluster_idx
def Kmeans(data, k, epsilon=1e-3, iteration=100):
if not isinstance(data, list) or len(data) == 0:
raise ValueError("输入样本集data必须是非空列表")
if not all(isinstance(sample, list) for sample in data):
raise ValueError("样本集中每个元素必须是特征列表")
sample_dim = len(data[0])
if not all(len(sample) == sample_dim for sample in data):
raise ValueError("所有样本必须具有相同的特征维度")
if not isinstance(k, int) or k <= 0:
raise ValueError("聚类数量k必须是正整数")
data_len = len(data)
if k >= data_len:
raise ValueError(f"聚类数量k({k})不能大于等于样本数量({data_len})")
# 步骤1:初始化聚类中心(从数据集中随机选k个不同样本)
center_indices = random.sample(range(data_len), k)
centers = [data[idx].copy() for idx in center_indices]
# 步骤2:迭代更新聚类中心与样本分配
for iter_num in range(iteration):
cluster_labels = [assign_cluster(sample, centers) for sample in data]
new_centers = []
for cluster_idx in range(k):
cluster_samples = [
data[sample_idx] for sample_idx in range(data_len)
if cluster_labels[sample_idx] == cluster_idx
]
if not cluster_samples:
# 从有样本的聚类中随机选一个样本作为新中心
valid_samples = [data[i] for i in range(data_len) if cluster_labels[i] != cluster_idx]
new_center = valid_samples[random.randint(0, len(valid_samples) - 1)].copy()
else:
# 按特征维度求均值(适配任意特征维度)
new_center = [
sum(sample[dim] for sample in cluster_samples) / len(cluster_samples)
for dim in range(sample_dim)
]
new_centers.append(new_center)
center_changes = []
for new_c, old_c in zip(new_centers, centers):
# 计算单个中心的欧氏变化量
change = math.sqrt(sum((new_c[dim] - old_c[dim]) ** 2 for dim in range(sample_dim)))
center_changes.append(change)
if max(center_changes) < epsilon:
print(f"迭代{iter_num + 1}次后收敛")
break
# 未收敛则更新中心,进入下一轮迭代
centers = new_centers
else:
print(f"达到最大迭代次数{iteration},未完全收敛(最大中心变化量:{max(center_changes):.6f})")
# 步骤3:返回最终结果(聚类中心+样本标签)
final_labels = [assign_cluster(sample, centers) for sample in data]
return centers, final_labels
if __name__ == "__main__":
test_data = [
[1, 2], [2, 1], [1, 1], [2, 2],
[5, 6], [6, 5], [5, 5], [6, 6],
[9, 10], [10, 9], [9, 9], [10, 10]
]
try:
final_centers, final_labels = Kmeans(test_data, k=3)
print("最终聚类中心:")
for idx, center in enumerate(final_centers, 1):
print(f" 聚类{idx}:{[round(val, 4) for val in center]}")
print("各样本聚类标签:")
print(f" {final_labels}")
except ValueError as e:
print(f"运行出错:{e}")
|
2301_80822435/machine-learning-course
|
assignment3/2班43.py
|
Python
|
mit
| 3,694
|
import numpy as np
class KMeans:
def __init__(self, k=3, max_iters=100, tol=1e-4):
self.k = k
self.max_iters = max_iters
self.tol = tol
def fit(self, X):
np.random.seed(42)
# Step 1:随机选择 k 个点作为初始聚类中心
random_idx = np.random.choice(len(X), self.k, replace=False)
self.centroids = X[random_idx]
for i in range(self.max_iters):
# Step 2:分配样本到最近的聚类中心
labels = self._assign_clusters(X)
# Step 3:计算新的聚类中心
new_centroids = np.array([X[labels == j].mean(axis=0) for j in range(self.k)])
# Step 4:判断收敛(中心点移动是否足够小)
if np.all(np.abs(new_centroids - self.centroids) < self.tol):
break
self.centroids = new_centroids
self.labels_ = labels
def _assign_clusters(self, X):
distances = np.linalg.norm(X[:, np.newaxis] - self.centroids, axis=2)
return np.argmin(distances, axis=1)
def predict(self, X):
return self._assign_clusters(X)
if __name__ == "__main__":
# 构造两类样本
X1 = np.random.randn(100, 2) + np.array([2, 2])
X2 = np.random.randn(100, 2) + np.array([-2, -2])
X3 = np.random.randn(100, 2) + np.array([5, -3])
X = np.vstack((X1, X2, X3))
# 聚类
kmeans = KMeans(k=3)
kmeans.fit(X)
print("聚类中心:")
print(kmeans.centroids)
print("前 10 个样本的类别:")
print(kmeans.labels_[:10])
|
2301_80822435/machine-learning-course
|
assignment3/2班45号.py
|
Python
|
mit
| 1,563
|
import random
def assign_cluster(x, centers):
min_dist_sq = float('inf')
cluster_idx = 0 # 最近聚类中心的索引
for i, center in enumerate(centers):
# 计算欧氏距离的平方(避免开方运算,提高效率)
dist_sq = sum((xi - ci) ** 2 for xi, ci in zip(x, center))
# 更新最小距离和对应索引
if dist_sq < min_dist_sq:
min_dist_sq = dist_sq
cluster_idx = i
return cluster_idx
def Kmeans(data, k, epsilon, iteration):
# 输入合法性检查
if not data:
raise ValueError("样本集不能为空")
if k <= 0 or k > len(data):
raise ValueError("k必须为正数且不大于样本数量")
# 检查所有样本维度是否一致
n_features = len(data[0])
for x in data:
if len(x) != n_features:
raise ValueError("所有样本必须具有相同的维度")
# 1. 初始化聚类中心(从样本中随机选择k个不重复样本)
n_samples = len(data)
initial_indices = random.sample(range(n_samples), k)
centers = [data[i].copy() for i in initial_indices] # 复制避免修改原数据
# 2. 迭代聚类过程
for _ in range(iteration):
# 2.1 为所有样本分配聚类标签
labels = [assign_cluster(x, centers) for x in data]
# 2.2 计算新的聚类中心
new_centers = []
for i in range(k):
# 收集第i个聚类的所有样本
cluster_samples = [data[j] for j in range(n_samples) if labels[j] == i]
# 处理空聚类(若该聚类无样本,随机选择一个样本作为新中心)
if not cluster_samples:
new_center = random.choice(data).copy()
else:
# 计算每个维度的均值作为新中心
new_center = []
for d in range(n_features):
dimension_values = [s[d] for s in cluster_samples]
new_center.append(sum(dimension_values) / len(dimension_values))
new_centers.append(new_center)
# 2.3 检查是否收敛(计算中心最大变化量)
max_change = 0.0
for old, new in zip(centers, new_centers):
# 计算欧氏距离
dist = sum((o - n) ** 2 for o, n in zip(old, new)) ** 0.5
if dist > max_change:
max_change = dist
# 若中心变化小于阈值,提前停止迭代
if max_change < epsilon:
centers = new_centers
break
# 更新聚类中心
centers = new_centers
# 计算最终的聚类标签
final_labels = [assign_cluster(x, centers) for x in data]
return centers, final_labels
# 测试数据(二维样本)
data = [
[1, 2], [1, 3], [2, 1], [2, 3], # 第一类
[5, 6], [6, 5], [6, 7], [7, 6], # 第二类
[10, 11], [11, 10], [11, 12], [12, 11] # 第三类
]
# 聚类(3个簇,收敛阈值0.001,最大迭代100次)
centers, labels = Kmeans(data, k=3, epsilon=1e-3, iteration=100)
print("最终聚类中心:")
for i, center in enumerate(centers):
print(f"簇{i+1}:{center}")
print("\n样本聚类标签:")
for i, label in enumerate(labels):
print(f"样本{data[i]} → 簇{label+1}")
|
2301_80822435/machine-learning-course
|
assignment3/2班46.py
|
Python
|
mit
| 3,380
|
import math
import random
from collections import defaultdict
def euclidean_distance(point1, point2):
"""
计算两个点之间的欧几里得距离
"""
if len(point1) != len(point2):
raise ValueError("Points must have the same dimensions")
squared_distance = 0
for i in range(len(point1)):
squared_distance += (point1[i] - point2[i]) ** 2
return math.sqrt(squared_distance)
def assign_cluster(x, centroids):
"""
将数据点x分配到最近的质心
"""
min_distance = float('inf')
cluster_index = -1
for i, centroid in enumerate(centroids):
distance = euclidean_distance(x, centroid)
if distance < min_distance:
min_distance = distance
cluster_index = i
return cluster_index
def calculate_centroids(data, cluster_assignments, k):
"""
根据当前聚类分配计算新的质心
"""
# 按聚类分组数据点
clusters = defaultdict(list)
for i, cluster_id in enumerate(cluster_assignments):
clusters[cluster_id].append(data[i])
new_centroids = []
for cluster_id in range(k):
if cluster_id in clusters and len(clusters[cluster_id]) > 0:
# 计算该聚类中所有点的均值作为新质心
cluster_points = clusters[cluster_id]
dimensions = len(cluster_points[0])
new_centroid = []
for dim in range(dimensions):
dim_sum = sum(point[dim] for point in cluster_points)
dim_avg = dim_sum / len(cluster_points)
new_centroid.append(dim_avg)
new_centroids.append(new_centroid)
else:
# 如果某个聚类没有数据点,随机选择一个数据点作为质心
new_centroids.append(random.choice(data))
return new_centroids
def has_converged(old_centroids, new_centroids, epsilon):
"""
检查质心是否收敛(变化小于epsilon)
"""
for old_centroid, new_centroid in zip(old_centroids, new_centroids):
distance = euclidean_distance(old_centroid, new_centroid)
if distance > epsilon:
return False
return True
def Kmeans(data, k, epsilon=1e-4, max_iterations=100):
"""
K均值聚类算法
参数:
data: 数据集,每个元素是一个数据点(列表或元组)
k: 聚类数量
epsilon: 收敛阈值
max_iterations: 最大迭代次数
返回:
centroids: 最终质心列表
cluster_assignments: 每个数据点的聚类分配
"""
if len(data) < k:
raise ValueError("Number of data points must be at least k")
# 1. 随机初始化质心
centroids = random.sample(data, k)
# 存储每个数据点的聚类分配
cluster_assignments = [0] * len(data)
iteration = 0
converged = False
while not converged and iteration < max_iterations:
# 2. 分配每个数据点到最近的质心
for i, point in enumerate(data):
cluster_assignments[i] = assign_cluster(point, centroids)
# 3. 计算新的质心
new_centroids = calculate_centroids(data, cluster_assignments, k)
# 4. 检查是否收敛
converged = has_converged(centroids, new_centroids, epsilon)
# 更新质心
centroids = new_centroids
iteration += 1
print(f"Iteration {iteration}: converged = {converged}")
return centroids, cluster_assignments
# 测试代码
if __name__ == "__main__":
# 创建测试数据
# 三个明显的聚类
test_data = [
[1, 1], [1, 2], [2, 1], [2, 2], # 聚类1
[8, 8], [8, 9], [9, 8], [9, 9], # 聚类2
[1, 8], [2, 8], [1, 9], [2, 9] # 聚类3
]
print("测试数据:")
for i, point in enumerate(test_data):
print(f"Point {i}: {point}")
print("\n运行K-means聚类...")
centroids, assignments = Kmeans(test_data, k=3, epsilon=0.001, max_iterations=100)
print("\n最终结果:")
print("质心:")
for i, centroid in enumerate(centroids):
print(f"Cluster {i}: {centroid}")
print("\n聚类分配:")
clusters = defaultdict(list)
for i, (point, cluster_id) in enumerate(zip(test_data, assignments)):
clusters[cluster_id].append(point)
print(f"Point {point} -> Cluster {cluster_id}")
print("\n按聚类分组:")
for cluster_id, points in clusters.items():
print(f"Cluster {cluster_id}: {points}")
|
2301_80822435/machine-learning-course
|
assignment3/2班47.py
|
Python
|
mit
| 4,607
|
import random # 导入随机模块
import math # 导入数学模块
def assign_cluster(x, c):
min_dist = float('inf') # 初始化最小距离为无穷大
best_idx = 0 # 初始化最佳质心索引
for i, center in enumerate(c): # 遍历所有质心
dist = math.sqrt(sum((a - b) ** 2 for a, b in zip(x, center))) # 计算欧几里得距离
if dist < min_dist: # 如果找到更近的质心
min_dist = dist # 更新最小距离
best_idx = i # 更新最佳索引
return best_idx # 返回最近质心的索引
def Kmeans(data, k, epsilon=0.001, max_iter=100):
"""
K均值聚类主函数
"""
centers = random.sample(data, k) # 随机选择k个初始质心
for it in range(max_iter): # 开始迭代
labels = [assign_cluster(p, centers) for p in data] # 为每个点分配聚类标签
new_centers = [] # 创建新质心列表
for i in range(k): # 遍历每个聚类
points = [data[j] for j, label in enumerate(labels) if label == i] # 获取属于当前聚类的所有点
if points: # 如果聚类不为空
new_center = [sum(dim) / len(points) for dim in zip(*points)] # 计算各维度均值作为新质心
new_centers.append(new_center) # 添加到新质心列表
else: # 如果聚类为空
new_centers.append(random.choice(data)) # 随机选择一个点作为质心
max_move = max(math.sqrt(sum((a - b) ** 2 for a, b in zip(old, new))) # 计算质心移动的最大距离
for old, new in zip(centers, new_centers))
centers = new_centers # 更新质心
if max_move < epsilon: # 检查是否收敛
break # 如果收敛则提前结束
return centers, labels # 返回最终质心和标签
if __name__ == "__main__":
data = [[1, 1], [1, 2], [2, 1], [2, 2], [8, 8], [8, 9], [9, 8], [9, 9]] # 创建测试数据
centers, labels = Kmeans(data, k=2) # 运行K均值聚类
print("最终质心:", centers) # 打印质心
print("各点标签:", labels) # 打印标签
|
2301_80822435/machine-learning-course
|
assignment3/2班49.py
|
Python
|
mit
| 2,229
|
import random
import math
def assign_cluster(x, c):
"""
将数据点x分配到最近的聚类中心
参数:
x: 数据点(列表或元组)
c: 聚类中心列表
返回:
最近的聚类中心的索引
"""
min_dist = float('inf')
cluster_idx = 0
for i, center in enumerate(c):
# 计算欧氏距离
dist = math.sqrt(sum((x[j] - center[j]) ** 2 for j in range(len(x))))
if dist < min_dist:
min_dist = dist
cluster_idx = i
return cluster_idx
def Kmeans(data, k, epsilon, iteration):
"""
K均值聚类算法
参数:
data: 数据点列表,每个数据点是一个列表或元组
k: 聚类数量
epsilon: 收敛阈值
iteration: 最大迭代次数
返回:
clusters: 每个数据点所属的聚类索引列表
centers: 最终的聚类中心列表
"""
if len(data) == 0 or k <= 0:
return [], []
# 初始化聚类中心(随机选择k个数据点)
centers = [list(data[i]) for i in random.sample(range(len(data)), min(k, len(data)))]
for _ in range(iteration):
# 分配每个数据点到最近的聚类中心
clusters = [assign_cluster(x, centers) for x in data]
# 计算新的聚类中心
new_centers = []
for i in range(k):
# 找到属于第i个聚类的所有点
cluster_points = [data[j] for j in range(len(data)) if clusters[j] == i]
if len(cluster_points) > 0:
# 计算均值作为新中心
new_center = [sum(point[d] for point in cluster_points) / len(cluster_points)
for d in range(len(data[0]))]
new_centers.append(new_center)
else:
# 如果某个聚类没有点,保持原中心
new_centers.append(centers[i])
# 检查是否收敛
max_change = max(math.sqrt(sum((centers[i][d] - new_centers[i][d]) ** 2
for d in range(len(centers[i]))))
for i in range(k))
if max_change < epsilon:
break
centers = new_centers
return clusters, centers
# 测试示例
if __name__ == "__main__":
# 示例数据
data = [
[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11],
[8, 2], [10, 2], [9, 3], [2, 1], [5, 5], [6, 6]
]
k = 3
epsilon = 0.01
max_iter = 100
clusters, centers = Kmeans(data, k, epsilon, max_iter)
print("聚类结果:")
for i, cluster_id in enumerate(clusters):
print(f"数据点 {data[i]} -> 聚类 {cluster_id}")
print("\n聚类中心:")
for i, center in enumerate(centers):
print(f"聚类 {i}: {center}")
|
2301_80822435/machine-learning-course
|
assignment3/2班50.py
|
Python
|
mit
| 2,920
|
"""
手动实现K均值聚类算法
只使用Python标准库
"""
import random
import math
def euclidean_distance(point1, point2):
"""
计算两个点之间的欧氏距离
参数:
point1: 第一个点(列表或元组)
point2: 第二个点(列表或元组)
返回:
欧氏距离
"""
if len(point1) != len(point2):
raise ValueError("两个点的维度必须相同")
sum_squared_diff = sum((a - b) ** 2 for a, b in zip(point1, point2))
return math.sqrt(sum_squared_diff)
def assign_cluster(x, c):
"""
将数据点x分配到最近的聚类中心c
参数:
x: 数据点(列表或元组)
c: 聚类中心列表,每个元素是一个聚类中心(列表或元组)
返回:
最近的聚类中心的索引
"""
if not c:
raise ValueError("聚类中心列表不能为空")
min_distance = float('inf')
closest_center_idx = 0
for idx, center in enumerate(c):
distance = euclidean_distance(x, center)
if distance < min_distance:
min_distance = distance
closest_center_idx = idx
return closest_center_idx
def Kmeans(data, k, epsilon, iteration):
"""
K均值聚类算法
"""
if not data:
raise ValueError("数据不能为空")
if k <= 0:
raise ValueError("聚类数量k必须大于0")
if k > len(data):
raise ValueError("聚类数量k不能大于数据点数量")
# 获取数据维度
dim = len(data[0])
# 1. 初始化聚类中心:随机选择k个数据点作为初始聚类中心
centers = [list(data[i]) for i in random.sample(range(len(data)), k)]
# 存储上一次的聚类中心,用于判断收敛
prev_centers = None
# 迭代
for iter_count in range(iteration):
# 2. 将每个数据点分配到最近的聚类中心
labels = [assign_cluster(x, centers) for x in data]
# 3. 更新聚类中心为每个簇的均值
new_centers = []
for center_idx in range(k):
# 找到属于当前簇的所有数据点
cluster_points = [data[i] for i in range(len(data)) if labels[i] == center_idx]
if not cluster_points:
# 如果某个簇没有数据点,保持原中心不变
new_centers.append(centers[center_idx][:])
else:
# 计算簇的均值作为新的聚类中心
new_center = []
for dim_idx in range(dim):
dim_sum = sum(point[dim_idx] for point in cluster_points)
new_center.append(dim_sum / len(cluster_points))
new_centers.append(new_center)
# 4. 检查是否收敛
if prev_centers is not None:
max_change = 0
for i in range(k):
change = euclidean_distance(prev_centers[i], new_centers[i])
if change > max_change:
max_change = change
if max_change < epsilon:
# 收敛,停止迭代
centers = new_centers
break
prev_centers = [center[:] for center in new_centers]
centers = new_centers
# 最后一次分配标签
labels = [assign_cluster(x, centers) for x in data]
return centers, labels
# 测试代码
if __name__ == "__main__":
# 示例数据:简单的2D数据点
test_data = [
[1.0, 1.0],
[1.5, 2.0],
[3.0, 4.0],
[5.0, 7.0],
[3.5, 5.0],
[4.5, 5.0],
[3.5, 4.5],
]
print("测试数据:")
for i, point in enumerate(test_data):
print(f" 点{i}: {point}")
print("\n运行K均值聚类 (k=2, epsilon=0.01, max_iterations=100)...")
centers, labels = Kmeans(test_data, k=2, epsilon=0.01, iteration=100)
print("\n聚类结果:")
print(f"聚类中心:")
for i, center in enumerate(centers):
print(f" 中心{i}: {center}")
print(f"\n数据点分配:")
for i, (point, label) in enumerate(zip(test_data, labels)):
print(f" 点{i} {point} -> 簇{label}")
|
2301_80822435/machine-learning-course
|
assignment3/2班51.py
|
Python
|
mit
| 4,303
|
import random
import math
from typing import List, Tuple # 从 typing 中导入类型提示,便于代码可读性
# 1. 计算欧氏距离的平方(避免不必要的 sqrt)
def euclidean_sq(a: List[float], b: List[float]) -> float:
# 对两个向量对应元素求差的平方并求和,得到距离的平方
return sum((x - y) ** 2 for x, y in zip(a, b))
# 2. 为单个样本分配最近的簇
def assign_cluster(x: List[float], centroids: List[List[float]]) -> int:
"""
返回样本 x 最近的质心索引
"""
distances = [euclidean_sq(x, c) for c in centroids]# 计算样本 x 与每个质心的距离平方,得到一个列表
return distances.index(min(distances))# 返回距离最小的质心对应的索引
# 3. 根据当前标签重新计算质心
def compute_centroids(
data: List[List[float]],
labels: List[int],
k: int
) -> List[List[float]]:
"""
data : 所有样本
labels : 每个样本对应的簇编号
k : 簇的数量
返回 : k 个新质心(列表形式)
"""
dim = len(data[0]) # 取特征维度(假设所有样本维度相同)
centroids = [[0.0] * dim for _ in range(k)]# 初始化 k 个质心,每个质心是一个全 0 的向量
counts = [0] * k# 用于统计每个簇中样本的数量
# 累加每个簇中所有样本的特征值
for x, label in zip(data, labels):
counts[label] += 1 # 该簇样本计数加一
for i in range(dim):
centroids[label][i] += x[i] # 对应维度的特征值累加
# 处理可能出现的空簇(即某个簇没有分配到任何样本)
for idx in range(k):
if counts[idx] == 0:
# 若空簇,随机挑选一个样本作为该簇的新质心,防止除以 0
centroids[idx] = random.choice(data)
else:
# 否则,用累加的特征和除以样本数,得到均值即新质心
centroids[idx] = [v / counts[idx] for v in centroids[idx]]
return centroids
# 4. K‑means 主函数
def kmeans(
data: List[List[float]],
k: int,
epsilon: float = 1e-4,
max_iter: int = 300
) -> Tuple[List[List[float]], List[int]]:
"""
data : 输入数据,形如 [[x1, x2, ...], [y1, y2, ...], ...]
k : 目标簇数
epsilon : 质心移动阈值,低于该值视为收敛
max_iter : 最大迭代次数
返回值 : (final_centroids, final_labels)
"""
# 参数合法性检查
if k <= 0:
raise ValueError("k 必须为正整数")
if k > len(data):
raise ValueError("k 不能大于样本数")
# 1) 随机初始化质心(从样本中随机抽取 k 个不重复的点)
centroids = random.sample(data, k)
# 迭代过程
for it in range(max_iter):
# 2) 为每个样本分配最近的簇
labels = [assign_cluster(x, centroids) for x in data]
# 3) 根据当前标签重新计算质心
new_centroids = compute_centroids(data, labels, k)
# 4) 检查收敛:计算所有质心位移的平方和的最大值
max_shift = max(euclidean_sq(c, nc) for c, nc in zip(centroids, new_centroids))
centroids = new_centroids # 更新质心为新质心
# 打印本次迭代的信息
print(f"Iteration {it+1}: max centroid shift = {math.sqrt(max_shift):.6f}")
# 若最大位移小于阈值的平方,则认为已收敛
if max_shift < epsilon ** 2:
print("收敛!")
break
# 返回最终的质心和每个样本的簇标签
return centroids, labels
# 5. 示例(可直接运行)
if __name__ == "__main__":
# 生成一个简单的二维数据集(手写,不依赖外部库)
data = [
[1.0, 2.0], [1.2, 1.8], [0.8, 2.2], # 簇 0 的样本
[5.0, 8.0], [5.2, 7.9], [4.9, 8.1], # 簇 1 的样本
[9.0, 1.0], [9.2, 1.1], [8.8, 0.9] # 簇 2 的样本
]
k = 3 # 设定要划分的簇数为 3
# 调用 kmeans,epsilon 设置得更小以获得更精细的收敛
final_centroids, final_labels = kmeans(data, k, epsilon=1e-5, max_iter=100)
print("\n最终质心:")
for idx, c in enumerate(final_centroids):
print(f" 簇 {idx}: {c}")
print("\n样本所属簇:")
for idx, label in enumerate(final_labels):
print(f" 样本 {idx} -> 簇 {label}")
|
2301_80822435/machine-learning-course
|
assignment3/2班54.py
|
Python
|
mit
| 4,575
|
import math
import random
def calc_dist(p1, p2):
return math.sqrt(sum((a - b) ** 2 for a, b in zip(p1, p2)))
def find_nearest(pt, centers):
dists = [calc_dist(pt, c) for c in centers]
return dists.index(min(dists))
def kmeans(data, k, eps=1e-4, max_iter=100):
centers = random.sample(data, k)
history = []
for i in range(max_iter):
labels = [find_nearest(pt, centers) for pt in data]
error = sum(calc_dist(data[idx], centers[label]) ** 2 for idx, label in enumerate(labels))
new_centers = []
for j in range(k):
group = [data[idx] for idx, label in enumerate(labels) if label == j]
if group:
new_center = [sum(dim) / len(group) for dim in zip(*group)]
else:
new_center = random.choice(data)
new_centers.append(new_center)
history.append({
'iteration': i + 1,
'centers': centers.copy(),
'error': error,
'labels': labels.copy()
})
max_move = max(calc_dist(old, new) for old, new in zip(centers, new_centers))
if max_move < eps:
print(f"第{i + 1}轮后收敛,最大移动距离: {max_move:.6f}")
break
centers = new_centers
print(f"第{i + 1}轮完成,误差: {error:.2f},最大移动: {max_move:.4f}")
return centers, labels, history
def evaluate_clustering(data, centers, labels):
compactness = []
for j in range(len(centers)):
cluster_points = [data[idx] for idx, label in enumerate(labels) if label == j]
if cluster_points:
avg_dist = sum(calc_dist(pt, centers[j]) for pt in cluster_points) / len(cluster_points)
compactness.append(avg_dist)
else:
compactness.append(0)
total_error = sum(calc_dist(data[idx], centers[label]) ** 2 for idx, label in enumerate(labels))
return {
'total_error': total_error,
'compactness': compactness,
'cluster_sizes': [labels.count(j) for j in range(len(centers))]
}
if __name__ == "__main__":
random.seed(42)
data = []
for center in [(2, 2), (6, 6), (2, 6)]:
data += [[random.gauss(center[0], 0.5), random.gauss(center[1], 0.5)] for _ in range(50)]
print(f"数据点总数: {len(data)}")
centers, labels, history = kmeans(data, 3)
print("\n最终中心点:")
for i, c in enumerate(centers):
print(f"中心{i}: ({c[0]:.2f}, {c[1]:.2f})")
evaluation = evaluate_clustering(data, centers, labels)
print(f"\n聚类评估:")
print(f"总误差: {evaluation['total_error']:.2f}")
print(f"每组点数: {evaluation['cluster_sizes']}")
print(f"各聚类平均距离: {[f'{x:.2f}' for x in evaluation['compactness']]}")
print(f"\n迭代历史 (共{len(history)}轮):")
for i, record in enumerate(history[-3:]):
print(f"第{record['iteration']}轮 - 误差: {record['error']:.2f}")
|
2301_80822435/machine-learning-course
|
assignment3/2班55.py
|
Python
|
mit
| 2,978
|
import random
import math
def assign_cluster(x, centroids):
min_distance = float('inf')
closest_centroid = 0
for i, centroid in enumerate(centroids):
distance = 0.0
for j in range(len(x)):
distance += (x[j] - centroid[j]) ** 2
distance = math.sqrt(distance)
if distance < min_distance:
min_distance = distance
closest_centroid = i
return closest_centroid
def Kmeans(data, k, epsilon=1e-4, max_iterations=100):
if not data:
return [], []
n_samples = len(data)
n_features = len(data[0])
centroids = []
used_indices = set()
while len(centroids) < k:
idx = random.randint(0, n_samples - 1)
if idx not in used_indices:
centroids.append(data[idx][:])
used_indices.add(idx)
for iteration in range(max_iterations):
labels = [assign_cluster(point, centroids) for point in data]
new_centroids = []
for i in range(k):
cluster_points = [point for j, point in enumerate(data) if labels[j] == i]
if cluster_points:
new_centroid = [0.0] * n_features
for point in cluster_points:
for dim in range(n_features):
new_centroid[dim] += point[dim]
for dim in range(n_features):
new_centroid[dim] /= len(cluster_points)
new_centroids.append(new_centroid)
else:
new_centroids.append(centroids[i][:])
max_shift = 0.0
for i in range(k):
shift = 0.0
for dim in range(n_features):
shift += (new_centroids[i][dim] - centroids[i][dim]) ** 2
max_shift = max(max_shift, math.sqrt(shift))
centroids = new_centroids
if max_shift < epsilon:
break
return centroids, labels
|
2301_80822435/machine-learning-course
|
assignment3/2班56.py
|
Python
|
mit
| 1,917
|
import random
import math
from typing import List, Tuple
def assign_cluster(point: List[float], centers: List[List[float]]) -> int:
#将数据点分配到最近的聚类中心
return min(range(len(centers)),
key=lambda i: sum((p - c) ** 2 for p, c in zip(point, centers[i])))
def kmeans(data: List[List[float]], k: int, eps: float = 1e-4, max_iter: int = 100) -> Tuple[
List[List[float]], List[int]]:
#K-means聚类算法实现
if len(data) < k:
raise ValueError("数据点数量少于聚类数k")
centers = random.sample(data, k)
assignments = []
for _ in range(max_iter):
# 分配聚类
assignments = [assign_cluster(p, centers) for p in data]
# 计算新中心
new_centers = []
for i in range(k):
points = [data[j] for j, c in enumerate(assignments) if c == i]
if not points:
new_centers.append(random.choice(data))
continue
new_centers.append([sum(d) / len(points) for d in zip(*points)])
# 检查收敛
if max(math.sqrt(sum((o - n) ** 2 for o, n in zip(old, new)))
for old, new in zip(centers, new_centers)) < eps:
break
centers = new_centers
return centers, assignments
def calculate_sse(data: List[List[float]], centers: List[List[float]], assignments: List[int]) -> float:
# 计算误差平方和
return sum(sum((p - c) ** 2 for p, c in zip(data[i], centers[assignments[i]]))
for i in range(len(data)))
# 测试
if __name__ == "__main__":
random.seed(66)
data = []
# 生成3类测试数据
for mu in [(1, 1), (2, 2), (3, 3)]:
data.extend([[random.gauss(m, 0.3) for m in mu] for _ in range(30)])
centers, assignments = kmeans(data, 3)
print("聚类中心:", [list(map(round, c, [2] * 2)) for c in centers])
print("各聚类数量:", [assignments.count(i) for i in range(3)])
print("SSE:", f"{calculate_sse(data, centers, assignments):.4f}")
|
2301_80822435/machine-learning-course
|
assignment3/2班57.py
|
Python
|
mit
| 2,041
|
import random
import math
def euclidean_distance(x, y):
"""计算两点之间的欧式距离"""
return math.sqrt(sum((a - b) ** 2 for a, b in zip(x, y)))
def assign_cluster(x, centers):
"""将样本 x 分配给最近的中心,返回其索引"""
distances = [euclidean_distance(x, c) for c in centers]
return distances.index(min(distances))
def mean_point(points):
"""计算一个簇中所有点的均值作为新的中心"""
n = len(points)
if n == 0:
return None
dim = len(points[0])
return [sum(p[i] for p in points) / n for i in range(dim)]
def Kmeans(data, k, epsilon=1e-4, iteration=100):
"""手动实现 K-means 聚类"""
# 1️⃣ 随机初始化 k 个中心
centers = random.sample(data, k)
for it in range(iteration):
# 2️⃣ 为每个样本分配簇
clusters = [[] for _ in range(k)]
for x in data:
idx = assign_cluster(x, centers)
clusters[idx].append(x)
# 3️⃣ 计算新的中心
new_centers = []
for cluster in clusters:
center = mean_point(cluster)
# 防止空簇
if center is None:
center = random.choice(data)
new_centers.append(center)
# 4️⃣ 判断是否收敛
max_shift = max(euclidean_distance(c1, c2) for c1, c2 in zip(centers, new_centers))
print(f"迭代 {it+1}: 最大中心变化量 = {max_shift:.6f}")
if max_shift < epsilon:
print("算法收敛!")
break
centers = new_centers
# 5️⃣ 最终簇分配
labels = [assign_cluster(x, centers) for x in data]
return centers, labels
|
2301_80822435/machine-learning-course
|
assignment3/2班58.py
|
Python
|
mit
| 1,748
|
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
from sklearn.datasets import make_blobs
import random
class KMeans:
def __init__(self, k=3, max_iters=100, tol=1e-4, random_state=None):
self.k = k
self.max_iters = max_iters
self.tol = tol
self.random_state = random_state
self.centroids = None
self.labels = None
self.inertia_ = None
def _initialize_centroids(self, X):
"""初始化质心 - 使用K-means++方法"""
n_samples, n_features = X.shape
if self.random_state is not None:
np.random.seed(self.random_state)
# 第一个质心随机选择
centroids = [X[np.random.randint(n_samples)]]
# 选择剩余的k-1个质心
for _ in range(1, self.k):
# 计算每个样本到最近质心的距离
distances = np.array([min([np.linalg.norm(x - c) for c in centroids])
for x in X])
# 根据距离的概率分布选择下一个质心
probabilities = distances ** 2 / (distances ** 2).sum()
cumulative_probs = probabilities.cumsum()
r = np.random.rand()
for i, p in enumerate(cumulative_probs):
if r < p:
centroids.append(X[i])
break
return np.array(centroids)
def _assign_clusters(self, X, centroids):
"""将样本分配到最近的质心"""
n_samples = X.shape[0]
labels = np.zeros(n_samples, dtype=int)
for i in range(n_samples):
distances = [np.linalg.norm(X[i] - centroid) for centroid in centroids]
labels[i] = np.argmin(distances)
return labels
def _update_centroids(self, X, labels):
"""更新质心位置"""
centroids = np.zeros((self.k, X.shape[1]))
for i in range(self.k):
# 获取属于第i个簇的所有样本
cluster_points = X[labels == i]
if len(cluster_points) > 0:
centroids[i] = cluster_points.mean(axis=0)
else:
# 如果簇为空,随机重新初始化
centroids[i] = X[np.random.randint(X.shape[0])]
return centroids
def _compute_inertia(self, X, labels, centroids):
"""计算簇内平方和( inertia )"""
inertia = 0
for i in range(self.k):
cluster_points = X[labels == i]
if len(cluster_points) > 0:
inertia += np.sum((cluster_points - centroids[i]) ** 2)
return inertia
def fit(self, X):
n_samples, n_features = X.shape
# 初始化质心
self.centroids = self._initialize_centroids(X)
# 迭代优化
for iteration in range(self.max_iters):
# 分配样本到簇
labels = self._assign_clusters(X, self.centroids)
# 更新质心
new_centroids = self._update_centroids(X, labels)
# 计算质心移动距离
centroid_shift = np.linalg.norm(new_centroids - self.centroids, axis=1).max()
# 更新质心
self.centroids = new_centroids
self.labels = labels
# 计算inertia
self.inertia_ = self._compute_inertia(X, labels, self.centroids)
# 检查收敛
if centroid_shift < self.tol:
print(f"收敛于第 {iteration + 1} 次迭代")
break
return self
def predict(self, X):
"""预测新样本的簇标签"""
return self._assign_clusters(X, self.centroids)
def fit_predict(self, X):
"""训练模型并返回簇标签"""
self.fit(X)
return self.labels
# 测试和可视化
def test_kmeans():
# 生成测试数据
X, y_true = make_blobs(n_samples=300, centers=3, n_features=2,
random_state=42, cluster_std=0.60)
# 创建KMeans实例
kmeans = KMeans(k=3, max_iters=100, random_state=42)
# 训练模型
labels = kmeans.fit_predict(X)
# 可视化结果
plt.figure(figsize=(12, 5))
# 原始数据
plt.subplot(1, 2, 1)
plt.scatter(X[:, 0], X[:, 1], c=y_true, cmap='viridis', s=50, alpha=0.8)
plt.title('原始数据 (真实标签)')
plt.xlabel('特征 1')
plt.ylabel('特征 2')
# 聚类结果
plt.subplot(1, 2, 2)
plt.scatter(X[:, 0], X[:, 1], c=labels, cmap='viridis', s=50, alpha=0.8)
plt.scatter(kmeans.centroids[:, 0], kmeans.centroids[:, 1],
c='red', marker='X', s=200, label='质心')
plt.title('KMeans 聚类结果')
plt.xlabel('特征 1')
plt.ylabel('特征 2')
plt.legend()
plt.tight_layout()
plt.show()
print(f"簇内平方和 (Inertia): {kmeans.inertia_:.2f}")
print("质心位置:")
for i, centroid in enumerate(kmeans.centroids):
print(f"簇 {i}: {centroid}")
# 肘部法则确定最佳K值
def elbow_method(X, max_k=10):
"""使用肘部法则确定最佳聚类数量"""
inertias = []
k_range = range(1, max_k + 1)
for k in k_range:
kmeans = KMeans(k=k, random_state=42)
kmeans.fit(X)
inertias.append(kmeans.inertia_)
# 绘制肘部图
plt.figure(figsize=(8, 6))
plt.plot(k_range, inertias, 'bo-')
plt.xlabel('聚类数量 K')
plt.ylabel('簇内平方和 (Inertia)')
plt.title('肘部法则 - 确定最佳K值')
plt.grid(True)
plt.show()
return inertias
if __name__ == "__main__":
# 生成测试数据
X, _ = make_blobs(n_samples=300, centers=3, n_features=2,
random_state=42, cluster_std=0.60)
# 测试KMeans
test_kmeans()
# 使用肘部法则
print("\n使用肘部法则确定最佳K值:")
inertias = elbow_method(X)
|
2301_80822435/machine-learning-course
|
assignment3/2班59.py
|
Python
|
mit
| 5,934
|
import math
import random
def assign_cluster(x, c):
"""
将样本x分配到最近的聚类中心
参数:
x: 单个样本(列表或元组)
c: 聚类中心列表(每个元素为列表或元组)
返回:
最近聚类中心的索引
"""
min_dist = float('inf')
cluster_idx = 0
for i, center in enumerate(c):
# 计算欧氏距离
dist = math.sqrt(sum((xi - ci)**2 for xi, ci in zip(x, center)))
if dist < min_dist:
min_dist = dist
cluster_idx = i
return cluster_idx
def Kmeans(data, k, epsilon, iteration):
"""
K均值聚类算法
参数:
data: 数据集(列表,每个元素为样本)
k: 聚类数量
epsilon: 中心点变化阈值,小于此值停止迭代
iteration: 最大迭代次数
返回:
centers: 最终聚类中心
labels: 每个样本的聚类标签
"""
# 检查输入合法性
if k <= 0 or k > len(data):
raise ValueError("k值必须为正且不大于样本数量")
if epsilon <= 0:
raise ValueError("epsilon必须为正数")
if iteration <= 0:
raise ValueError("迭代次数必须为正")
# 初始化聚类中心(随机选择k个不同样本)
centers = random.sample(data, k)
n_features = len(data[0]) # 特征维度
for _ in range(iteration):
# 分配聚类
clusters = [[] for _ in range(k)] # 每个聚类包含的样本
for x in data:
idx = assign_cluster(x, centers)
clusters[idx].append(x)
# 计算新的聚类中心
new_centers = []
for cluster in clusters:
if not cluster: # 避免空聚类(可根据实际需求调整处理方式)
new_centers.append(random.choice(data)) # 随机选一个样本作为中心
continue
# 计算每个特征的均值
center = [sum(dim) / len(cluster) for dim in zip(*cluster)]
new_centers.append(center)
# 计算中心变化量
total_move = 0.0
for old, new in zip(centers, new_centers):
total_move += math.sqrt(sum((o - n)**2 for o, n in zip(old, new)))
# 检查是否收敛
if total_move < epsilon:
centers = new_centers
break
centers = new_centers
# 生成最终标签
labels = [assign_cluster(x, centers) for x in data]
return centers, labels
|
2301_80822435/machine-learning-course
|
assignment3/2班61.py
|
Python
|
mit
| 2,529
|
import random
import math
def assign_cluster(x, c):
min_dist = float('inf') # 初始最小距离设为无穷大
cluster_idx = 0 # 初始聚类索引设为0
for i, centroid in enumerate(c):
# 计算x与当前质心的欧氏距离(多维向量距离公式)
dist = 0.0
for xi, ci in zip(x, centroid):
dist += (xi - ci) ** 2
dist = math.sqrt(dist)
# 更新最小距离和对应的聚类索引
if dist < min_dist:
min_dist = dist
cluster_idx = i
return cluster_idx
def Kmeans(data, k, epsilon=1e-3, iteration=100):
if not isinstance(k, int) or k <= 0:
raise ValueError("k必须是正整数")
if len(data) <= k:
raise ValueError("样本数量必须大于k")
# 校验所有样本维度一致
dim = len(data[0])
for x in data:
if len(x) != dim:
raise ValueError("所有样本必须具有相同的维度")
# 用索引随机选择,避免重复选择同一个样本
random.seed(42) # 固定随机种子,保证结果可复现(可删除)
centroid_indices = random.sample(range(len(data)), k)
centroids = [data[idx].copy() for idx in centroid_indices]
for iter_cnt in range(iteration):
# 保存当前质心(用于后续计算变化量)
old_centroids = [c.copy() for c in centroids]
cluster_assignments = []
for x in data:
cluster_idx = assign_cluster(x, centroids)
cluster_assignments.append(cluster_idx)
# 初始化:每个聚类的样本总和、样本数量
cluster_sums = [[0.0 for _ in range(dim)] for _ in range(k)] # 按维度求和
cluster_counts = [0 for _ in range(k)] # 样本数量
# 累加每个聚类的样本特征
for idx, x in enumerate(data):
cluster_idx = cluster_assignments[idx]
for d in range(dim):
cluster_sums[cluster_idx][d] += x[d]
cluster_counts[cluster_idx] += 1
# 计算新质心(均值):避免除零(理论上不会发生,因k<=样本数且分配均匀)
for i in range(k):
if cluster_counts[i] == 0:
# 极端情况:某个聚类无样本,重新随机选择一个样本作为质心
centroids[i] = random.choice(data).copy()
else:
# 按维度计算均值
for d in range(dim):
centroids[i][d] = cluster_sums[i][d] / cluster_counts[i]
# 计算所有质心的最大变化量(多维向量的欧氏距离)
max_centroid_change = 0.0
for old_c, new_c in zip(old_centroids, centroids):
change = 0.0
for oc, nc in zip(old_c, new_c):
change += (oc - nc) ** 2
change = math.sqrt(change)
if change > max_centroid_change:
max_centroid_change = change
# 若质心变化量小于epsilon,收敛并退出迭代
if max_centroid_change < epsilon:
print(f"迭代{iter_cnt+1}次后收敛(质心最大变化量:{max_centroid_change:.6f} < {epsilon})")
break
return cluster_assignments, centroids
if __name__ == "__main__":
data = [
[1, 2], [2, 1], [1, 1], [2, 2],
[5, 6], [6, 5], [5, 5], [6, 6],
[9, 8], [8, 9], [9, 9], [8, 8]
]
cluster_results, final_centroids = Kmeans(data, k=3)
print("\n最终聚类结果(样本索引->聚类索引):")
for idx, cluster_idx in enumerate(cluster_results):
print(f"样本{data[idx]} -> 聚类{cluster_idx}")
print("\n最终质心:")
for i, centroid in enumerate(final_centroids):
print(f"聚类{i}质心:{[round(c, 3) for c in centroid]}")
|
2301_80822435/machine-learning-course
|
assignment3/2班63.py
|
Python
|
mit
| 3,827
|
import random
import math
def assign_cluster(x, centroids):
min_dist = float('inf')
cluster_idx = 0
for idx, centroid in enumerate(centroids):
# 计算数据点x与质心centroid的欧氏距离
dist = 0.0
for xi, ci in zip(x, centroid):
dist += (xi - ci) ** 2
dist = math.sqrt(dist) # 欧氏距离
if dist < min_dist:
min_dist = dist
cluster_idx = idx
return cluster_idx
def Kmeans(data, k, epsilon=1e-3, iteration=100):
# 输入合法性校验
if not data or len(data) <= k:
raise ValueError("数据集长度必须大于聚类数量k")
if k <= 1:
raise ValueError("聚类数量k必须大于1")
dim = len(data[0])
for x in data:
if len(x) != dim:
raise ValueError("所有数据点必须具有相同的维度")
min_vals = [min(x[d] for x in data) for d in range(dim)]
max_vals = [max(x[d] for x in data) for d in range(dim)]
centroids = []
for _ in range(k):
centroid = [random.uniform(min_vals[d], max_vals[d]) for d in range(dim)]
centroids.append(centroid)
iter_count = 0
while iter_count < iteration:
old_centroids = [c.copy() for c in centroids]
clusters = []
for x in data:
cluster_idx = assign_cluster(x, centroids)
clusters.append(cluster_idx)
sum_clusters = [[0.0 for _ in range(dim)] for _ in range(k)] # 各聚类各维度总和
count_clusters = [0 for _ in range(k)] # 各聚类数据点数量
for x, idx in zip(data, clusters):
for d in range(dim):
sum_clusters[idx][d] += x[d]
count_clusters[idx] += 1
for i in range(k):
if count_clusters[i] > 0:
centroids[i] = [sum_clusters[i][d] / count_clusters[i] for d in range(dim)]
max_centroid_change = 0.0
for old_c, new_c in zip(old_centroids, centroids):
change = 0.0
for oc, nc in zip(old_c, new_c):
change += (oc - nc) ** 2
change = math.sqrt(change)
if change > max_centroid_change:
max_centroid_change = change
if max_centroid_change < epsilon:
print(f"迭代{iter_count+1}次后收敛(质心最大变化量:{max_centroid_change:.6f} < {epsilon})")
break
iter_count += 1
if iter_count >= iteration:
print(f"已达到最大迭代次数{iteration},未完全收敛(质心最大变化量:{max_centroid_change:.6f})")
return clusters, centroids
if __name__ == "__main__":
print("="*50)
print("测试1:一维数据聚类(模拟灰度值)")
gray_data = [[12], [15], [18], [20], [85], [90], [92], [95], [98], [100]]
clusters1, centroids1 = Kmeans(data=gray_data, k=2, epsilon=1e-4, iteration=50)
print(f"原始数据:{[x[0] for x in gray_data]}")
print(f"聚类结果(0/1表示聚类索引):{clusters1}")
print(f"最终质心:{[round(c[0], 2) for c in centroids1]}")
print("\n" + "="*50)
print("测试2:二维数据聚类")
two_d_data = [
[1.0, 2.0], [1.5, 1.8], [5.0, 8.0], [8.0, 8.0],
[1.0, 0.6], [9.0, 11.0], [8.0, 2.0], [10.0, 2.0],
[9.0, 3.0], [0.5, 1.0], [7.0, 9.0], [6.0, 8.5]
]
clusters2, centroids2 = Kmeans(data=two_d_data, k=3, epsilon=1e-4, iteration=50)
print(f"二维数据聚类结果:{clusters2}")
print(f"最终质心:{[(round(c[0],2), round(c[1],2)) for c in centroids2]}")
|
2301_80822435/machine-learning-course
|
assignment3/2班64.py
|
Python
|
mit
| 3,685
|
import random
import math
def assign_cluster(x, c):
"""
将样本x分配到最近的簇中心
参数:
x: 单个样本向量 (list 或 tuple)
c: 簇中心列表,每个元素是一个簇中心向量 (list of lists)
返回:
int: 最近簇的索引
"""
min_distance = float('inf')
cluster_index = 0
for i, center in enumerate(c):
# 计算欧几里得距离
distance = 0.0
for xi, ci in zip(x, center):
distance += (xi - ci) ** 2
distance = math.sqrt(distance)
# 更新最近簇
if distance < min_distance:
min_distance = distance
cluster_index = i
return cluster_index
def Kmeans(data, k, epsilon=1e-6, iteration=100):
"""
K 均值聚类算法实现
参数:
data: 样本集,每个元素是一个样本向量 (list of lists)
k: 簇的数量 (int)
epsilon: 簇中心变化的阈值,用于判断收敛 (float, 可选)
iteration: 最大迭代次数 (int, 可选)
返回:
tuple: (簇分配结果, 最终簇中心)
- 簇分配结果: list,每个元素是对应样本的簇索引
- 最终簇中心: list of lists,每个元素是一个簇中心向量
"""
# 检查输入有效性
if not data or k <= 0 or k > len(data):
raise ValueError("无效的输入参数")
# 1. 初始化簇中心:随机选择 k 个样本作为初始中心
num_samples = len(data)
num_features = len(data[0])
cluster_centers = random.sample(data, k)
for _ in range(iteration):
# 2. 分配簇
cluster_assignments = [assign_cluster(x, cluster_centers) for x in data]
# 3. 更新簇中心
new_cluster_centers = []
for i in range(k):
# 收集当前簇的所有样本
cluster_samples = [data[j] for j in range(num_samples) if cluster_assignments[j] == i]
# 计算新的簇中心(均值)
if not cluster_samples: # 避免空簇
new_center = random.choice(data) # 重新随机选择一个中心
else:
new_center = []
for f in range(num_features):
feature_mean = sum(sample[f] for sample in cluster_samples) / len(cluster_samples)
new_center.append(feature_mean)
new_cluster_centers.append(new_center)
# 4. 检查收敛:计算簇中心的总变化量
total_change = 0.0
for old_center, new_center in zip(cluster_centers, new_cluster_centers):
for old, new in zip(old_center, new_center):
total_change += (old - new) ** 2
# 如果变化量小于阈值,收敛并退出
if math.sqrt(total_change) < epsilon:
break
cluster_centers = new_cluster_centers
return cluster_assignments, cluster_centers
|
2301_80822435/machine-learning-course
|
assignment3/2班65.py
|
Python
|
mit
| 3,038
|
import math
import random
def kmeans_plus_plus_init(data, k):
centroids = []
# 1. 随机选择第一个聚类中心
centroids.append(random.choice(data))
# 2. 选择剩余的 k-1 个聚类中心
for _ in range(1, k):
# 计算每个数据点到最近聚类中心的距离
distances = []
for point in data:
min_distance = float('inf')
for centroid in centroids:
distance = 0
for j in range(len(point)):
distance += (point[j] - centroid[j]) ** 2
min_distance = min(min_distance, distance)
distances.append(min_distance)
# 将距离转换为概率(距离越大,被选中的概率越大)
total_distance = sum(distances)
probabilities = [dist / total_distance for dist in distances]
# 根据概率分布选择下一个聚类中心
next_centroid_idx = random.choices(range(len(data)), weights=probabilities)[0]
centroids.append(data[next_centroid_idx])
return centroids
def assign_cluster(x, centroids):
min_distance = float('inf')
closest_centroid = 0
for i, centroid in enumerate(centroids):
# 计算欧几里得距离
distance = 0
for j in range(len(x)):
distance += (x[j] - centroid[j]) ** 2
distance = math.sqrt(distance)
if distance < min_distance:
min_distance = distance
closest_centroid = i
return closest_centroid
def Kmeans(data, k, epsilon=1e-4, max_iterations=100, init_method='random'):
# 1. 初始化聚类中心
if init_method == 'kmeans++':
centroids = kmeans_plus_plus_init(data, k)
print("使用K-means++初始化")
else:
centroids = random.sample(data, k)
print("使用随机初始化")
print("初始聚类中心:")
for i, centroid in enumerate(centroids):
print(f" 聚类 {i}: {[round(c, 3) for c in centroid]}")
for iteration in range(max_iterations):
# 2. 分配数据点到聚类
clusters = [[] for _ in range(k)]
assignments = []
for point in data:
cluster_idx = assign_cluster(point, centroids)
clusters[cluster_idx].append(point)
assignments.append(cluster_idx)
# 3. 更新聚类中心
new_centroids = []
for cluster_points in clusters:
if not cluster_points:
# 如果聚类为空,随机选择一个数据点作为中心
new_centroids.append(random.choice(data))
else:
# 计算每个维度的平均值
new_centroid = []
for dim in range(len(data[0])):
dim_avg = sum(point[dim] for point in cluster_points) / len(cluster_points)
new_centroid.append(dim_avg)
new_centroids.append(new_centroid)
# 4. 检查收敛
converged = True
for i in range(k):
# 计算新旧聚类中心的距离
distance = 0
for dim in range(len(centroids[i])):
distance += (centroids[i][dim] - new_centroids[i][dim]) ** 2
distance = math.sqrt(distance)
if distance > epsilon:
converged = False
break
centroids = new_centroids
if converged:
print(f"在第 {iteration + 1} 次迭代后收敛")
break
else:
print(f"达到最大迭代次数 {max_iterations}")
return centroids, clusters, assignments
if __name__ == "__main__":
# 创建简单的测试数据
random.seed(42)
# 生成三组聚类数据
test_data = []
# 第一组
for _ in range(20):
test_data.append([random.gauss(2, 0.3), random.gauss(2, 0.3)])
# 第二组
for _ in range(20):
test_data.append([random.gauss(8, 0.3), random.gauss(8, 0.3)])
# 第三组
for _ in range(20):
test_data.append([random.gauss(5, 0.3), random.gauss(2, 0.3)])
print("数据集大小:", len(test_data))
# 比较两种初始化方法
print("\n" + "=" * 50)
print("使用随机初始化:")
print("=" * 50)
centroids_random, clusters_random, assignments_random = Kmeans(
test_data, k=3, epsilon=0.001, init_method='random'
)
print("\n" + "=" * 50)
print("使用K-means++初始化:")
print("=" * 50)
centroids_plus, clusters_plus, assignments_plus = Kmeans(
test_data, k=3, epsilon=0.001, init_method='kmeans++'
)
# 输出结果比较
print("\n" + "=" * 50)
print("结果比较:")
print("=" * 50)
print("\n随机初始化结果:")
for i, centroid in enumerate(centroids_random):
print(f"聚类 {i}: {[round(c, 3) for c in centroid]}, 点数: {len(clusters_random[i])}")
print("\nK-means++初始化结果:")
for i, centroid in enumerate(centroids_plus):
print(f"聚类 {i}: {[round(c, 3) for c in centroid]}, 点数: {len(clusters_plus[i])}")
# 计算聚类质量(簇内距离和)
def calculate_wcss(centroids, clusters):
"""计算簇内平方和(Within-Cluster Sum of Squares)"""
wcss = 0
for i, cluster_points in enumerate(clusters):
centroid = centroids[i]
for point in cluster_points:
distance = 0
for dim in range(len(point)):
distance += (point[dim] - centroid[dim]) ** 2
wcss += distance
return wcss
wcss_random = calculate_wcss(centroids_random, clusters_random)
wcss_plus = calculate_wcss(centroids_plus, clusters_plus)
print(f"\n聚类质量比较:")
print(f"随机初始化 WCSS: {wcss_random:.4f}")
print(f"K-means++初始化 WCSS: {wcss_plus:.4f}")
print(f"改进: {(wcss_random - wcss_plus) / wcss_random * 100:.2f}%")
|
2301_80822435/machine-learning-course
|
assignment3/2班66.py
|
Python
|
mit
| 6,111
|
import random
import math
def assign_cluster(x, c):
min_distance = float("inf") #初始化最小距离
cluster_index = 0
for i, center in enumerate(c):
#计算欧氏距离
distance = math.sqrt(sum((xi - ci) ** 2 for xi, ci in zip(x, center)))
if distance < min_distance:
min_distance = distance
cluster_index = i
return cluster_index
def Kmeans(data, k, epsilon=1e-4, max_iterations=100):
#随机选择中心
centers = random.sample(data, k)
#初始化聚类结果
for _ in range(max_iterations):
clusters = [[] for _ in range(k)]
labels = []
for point in data:
cluster_idx = assign_cluster(point, centers)
clusters[cluster_idx].append(point)
labels.append(cluster_idx)
#更新中心点
new_centers = []
for cluster in clusters:
if not cluster:
new_centers.append(random.choice(data))
else:
new_center = [sum(dim) / len(cluster) for dim in zip(*cluster)]
new_centers.append(new_center)
#检查中心点变化是否小于阈值
center_shift = sum(
math.sqrt(sum((ci - nci) ** 2 for ci, nci in zip(center, new_center)))
for center, new_center in zip(centers, new_centers)
)
if center_shift < epsilon:
break
centers = new_centers
return centers, labels
if __name__ == "__main__":
# 生成3类随机数据点
centers = [[1, 1], [4, 4], [7, 7]]
data = []
for center in centers:
for _ in range(100):
data.append([random.gauss(center[0], 0.5), random.gauss(center[1], 0.5)])
# 运行K-Means算法
k = 3
final_centers, labels = Kmeans(data, k)
# 输出结果
print("最终聚类中心:")
for i, center in enumerate(final_centers):
print(f"簇{i}: {center}")
# 可视化
import matplotlib.pyplot as plt
colors = ['r', 'g', 'b']
for i, point in enumerate(data):
plt.scatter(point[0], point[1], c=colors[labels[i]])
for center in final_centers:
plt.scatter(center[0], center[1], c='black', marker='x', s=100)
plt.title("K-Means")
plt.show()
|
2301_80822435/machine-learning-course
|
assignment3/2班67.py
|
Python
|
mit
| 2,266
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False
def assign_cluster(x, c):
distances = np.linalg.norm(c - x, axis=1)
y = np.argmin(distances)
return y
def Kmeans(data, k, epsilon=1e-3, iteration=100):
n_samples, n_features = data.shape
np.random.seed(42)
centroids = data[np.random.choice(n_samples, k, replace=False)]
Y = np.zeros(n_samples, dtype=int)
for _ in range(iteration):
for i in range(n_samples):
Y[i] = assign_cluster(data[i], centroids)
new_centroids = np.array([data[Y == idx].mean(axis=0) for idx in range(k)])
if np.linalg.norm(new_centroids - centroids) < epsilon:
break
centroids = new_centroids
return Y, centroids
if __name__ == "__main__":
np.random.seed(42)
cluster1 = np.random.randn(30, 2) + [5, 5]
cluster2 = np.random.randn(30, 2) + [0, 0]
cluster3 = np.random.randn(40, 2) + [10, 0]
test_data = np.vstack([cluster1, cluster2, cluster3])
k = 3
labels, centers = Kmeans(test_data, k=k)
print(f"聚类完成!共{len(test_data)}个样本,聚为{k}类")
print("前10个样本的簇标签:", labels[:10])
print("最终聚类中心:\n", centers)
plt.figure(figsize=(8, 6))
for i in range(k):
plt.scatter(test_data[labels == i, 0], test_data[labels == i, 1], label=f"簇{i+1}")
plt.scatter(centers[:, 0], centers[:, 1], c="black", marker="*", s=200, label="聚类中心")
plt.legend()
plt.title("K-means聚类结果")
plt.show()
|
2301_80822435/machine-learning-course
|
assignment3/2班68.py
|
Python
|
mit
| 1,650
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
def assign_cluster(x, c):
"""
将样本 x 分配到最近的聚类中心
"""
distances = np.linalg.norm(x[:, np.newaxis] - c, axis=2) # shape: (n_samples, K)
y = np.argmin(distances, axis=1)
return y
def Kmean(data, K, epsilon=1e-4, max_iteration=100):
"""
K-Means 聚类算法
"""
n_samples, n_features = data.shape
np.random.seed(42)
centers = data[np.random.choice(n_samples, K, replace=False)]
for i in range(max_iteration):
# Step 1: 分配簇
labels = assign_cluster(data, centers)
# Step 2: 更新中心
new_centers = np.array([
data[labels == k].mean(axis=0) if np.any(labels == k) else centers[k]
for k in range(K)
])
# Step 3: 判断收敛
shift = np.linalg.norm(new_centers - centers)
if shift < epsilon:
print(f"第 {i+1} 次迭代后收敛,中心移动量 {shift:.6f}")
break
centers = new_centers
return centers, labels
if __name__ == "__main__":
# 生成二维数据
data, _ = make_blobs(n_samples=200, centers=3, n_features=2, random_state=42)
centers, labels = Kmean(data, K=3)
print("最终聚类中心:\n", centers)
# ======================
# 可视化
# ======================
plt.figure(figsize=(8, 6))
# 绘制每个簇的样本点
for k in range(3):
plt.scatter(data[labels == k, 0], data[labels == k, 1], label=f'Cluster {k}', alpha=0.6)
# 绘制中心点
plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, marker='X', label='Centers')
plt.title("K-Means Clustering Result")
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.legend()
plt.grid(True)
plt.show()
|
2301_80822435/machine-learning-course
|
assignment3/2班70.py
|
Python
|
mit
| 1,862
|
import math
import numpy as np
from collections import Counter
from operator import itemgetter
class KNN:
def __init__(self, k=3, task='classification'):
"""
初始化 KNN 模型
参数:
k: 近邻数量
task: 任务类型,'classification' 或 'regression'
"""
self.k = k
self.task = task
self.X_train = None
self.y_train = None
def fit(self, X, y):
"""
训练模型,存储训练数据
参数:
X: 训练特征,形状为 (n_samples, n_features)
y: 训练标签,形状为 (n_samples,)
"""
self.X_train = X
self.y_train = y
def _euclidean_distance(self, a, b):
"""
计算两个向量之间的欧氏距离
"""
return math.sqrt(sum((x - y) ** 2 for x, y in zip(a, b)))
def _get_k_neighbors(self, x):
"""
获取距离目标样本最近的 k 个邻居
返回:
neighbors: 最近的 k 个邻居的索引和距离
"""
distances = []
# 计算与所有训练样本的距离
for i, train_sample in enumerate(self.X_train):
dist = self._euclidean_distance(x, train_sample)
distances.append((i, dist))
# 按距离排序并取前 k 个
distances.sort(key=itemgetter(1))
neighbors = distances[:self.k]
return neighbors
def predict(self, X):
"""
预测新样本的标签
参数:
X: 测试特征,形状为 (n_samples, n_features)
返回:
predictions: 预测结果
"""
predictions = []
for sample in X:
# 获取 k 个最近邻居
neighbors = self._get_k_neighbors(sample)
if self.task == 'classification':
# 分类任务:多数投票
neighbor_labels = [self.y_train[idx] for idx, _ in neighbors]
most_common = Counter(neighbor_labels).most_common(1)
predictions.append(most_common[0][0])
elif self.task == 'regression':
# 回归任务:平均值
neighbor_labels = [self.y_train[idx] for idx, _ in neighbors]
predictions.append(sum(neighbor_labels) / len(neighbor_labels))
return predictions
def predict_proba(self, X):
"""
分类任务中返回每个类别的概率(仅适用于分类任务)
参数:
X: 测试特征,形状为 (n_samples, n_features)
返回:
probabilities: 每个样本属于每个类别的概率
"""
if self.task != 'classification':
raise ValueError("predict_proba 仅适用于分类任务")
probabilities = []
for sample in X:
# 获取 k 个最近邻居
neighbors = self._get_k_neighbors(sample)
# 统计每个类别的数量
neighbor_labels = [self.y_train[idx] for idx, _ in neighbors]
label_counts = Counter(neighbor_labels)
# 计算概率
prob_dict = {}
total = len(neighbors)
for label in set(self.y_train):
prob_dict[label] = label_counts.get(label, 0) / total
probabilities.append(prob_dict)
return probabilities
|
2301_80822435/machine-learning-course
|
assignment4/1班01.py
|
Python
|
mit
| 3,396
|
import math
import matplotlib.pyplot as plt
# ===================== 距离函数 =====================
def euclidean_distance(x1, x2):
return math.sqrt(sum((a - b) ** 2 for a, b in zip(x1, x2)))
# ===================== KNN 预测函数 =====================
def knn_predict(train_X, train_y, x, k=3):
distances = []
for xi, yi in zip(train_X, train_y):
d = euclidean_distance(x, xi)
distances.append((d, yi, xi))
# 按距离排序
distances.sort(key=lambda t: t[0])
k_nearest = distances[:k]
# 多数投票
labels = [label for _, label, _ in k_nearest]
pred = max(set(labels), key=labels.count)
return pred, k_nearest
# ===================== 示例数据 =====================
train_X = [
[1.0, 2.0],
[1.5, 1.8],
[5.0, 8.0],
[6.0, 9.0]
]
train_y = [0, 0, 1, 1]
test_point = [2.0, 2.0]
k = 3
# 预测
pred_label, nearest = knn_predict(train_X, train_y, test_point, k)
print("预测类别为:", pred_label)
# ===================== 可视化 =====================
plt.figure(figsize=(7, 7))
# 颜色选择
colors = {0: "blue", 1: "red"}
# 画训练点
for (x, y), label in zip(train_X, train_y):
plt.scatter(x, y, color=colors[label], s=80)
plt.text(x+0.05, y+0.05, f"{label}", fontsize=12)
# 画测试点
plt.scatter(test_point[0], test_point[1], color="green", marker="^", s=200)
plt.text(test_point[0]+0.05, test_point[1]+0.05, "test", fontsize=12)
# 画 k 个最近邻连线
for dist, label, point in nearest:
plt.plot([test_point[0], point[0]], [test_point[1], point[1]], linestyle="--")
plt.title(f"KNN (k={k}) 预测结果: {pred_label}")
plt.xlabel("X1")
plt.ylabel("X2")
plt.grid(True)
plt.show()
|
2301_80822435/machine-learning-course
|
assignment4/1班02.py
|
Python
|
mit
| 1,706
|
import math
from collections import Counter
from operator import itemgetter
class KNN:
"""
K近邻算法实现(仅使用Python标准库)
适用于分类问题
"""
def __init__(self, k=5, weights='uniform', metric='euclidean'):
"""
初始化KNN分类器
参数:
k: 邻居数量,默认5
weights: 权重类型,'uniform'(平等)或'distance'(按距离加权)
metric: 距离度量,'euclidean'(欧氏距离)或'manhattan'(曼哈顿距离)
"""
self.k = k
self.weights = weights
self.metric = metric
self.X_train = None
self.y_train = None
def fit(self, X, y):
"""
训练模型(KNN只需存储数据)
参数:
X: 训练特征,列表的列表或元组的列表
y: 训练标签,列表
"""
self.X_train = [tuple(sample) for sample in X] # 转换为元组确保不可变
self.y_train = list(y)
return self
def _calculate_distance(self, x1, x2):
"""
计算两个样本点之间的距离[1,3](@ref)
"""
if len(x1) != len(x2):
raise ValueError("样本特征维度不一致")
if self.metric == 'euclidean':
# 欧氏距离: sqrt(Σ(x_i - y_i)^2)[1](@ref)
squared_diff = sum((a - b) ** 2 for a, b in zip(x1, x2))
return math.sqrt(squared_diff)
elif self.metric == 'manhattan':
# 曼哈顿距离: Σ|x_i - y_i|[1](@ref)
return sum(abs(a - b) for a, b in zip(x1, x2))
else:
raise ValueError("不支持的距离度量方法")
def _normalize_minmax(self, X):
"""
最小-最大归一化[2,7](@ref)
公式: (x - min) / (max - min)
"""
if not X:
return X
# 转置矩阵以便按特征列计算
transposed = list(zip(*X))
normalized_transposed = []
for feature_col in transposed:
min_val = min(feature_col)
max_val = max(feature_col)
range_val = max_val - min_val
if range_val == 0: # 避免除零
normalized_col = [0.0] * len(feature_col)
else:
normalized_col = [(x - min_val) / range_val for x in feature_col]
normalized_transposed.append(normalized_col)
# 转置回来
return list(zip(*normalized_transposed))
def predict_single(self, x):
"""
预测单个样本的类别[1,3,8](@ref)
"""
if self.X_train is None or self.y_train is None:
raise ValueError("请先使用fit方法训练模型")
# 计算与所有训练样本的距离
distances = []
for i, train_sample in enumerate(self.X_train):
dist = self._calculate_distance(x, train_sample)
distances.append((dist, i))
# 按距离排序并选择前k个[3](@ref)
distances.sort(key=itemgetter(0))
k_nearest = distances[:self.k]
# 统计k个邻居的类别[1](@ref)
if self.weights == 'uniform':
# 平等权重
class_votes = {}
for dist, idx in k_nearest:
label = self.y_train[idx]
class_votes[label] = class_votes.get(label, 0) + 1
# 返回出现次数最多的类别[3](@ref)
return max(class_votes.items(), key=itemgetter(1))[0]
elif self.weights == 'distance':
# 距离加权(距离越小权重越大)
class_weights = {}
for dist, idx in k_nearest:
label = self.y_train[idx]
# 避免除零,给距离加一个小值
weight = 1 / (dist + 1e-8)
class_weights[label] = class_weights.get(label, 0) + weight
return max(class_weights.items(), key=itemgetter(1))[0]
def predict(self, X):
"""
预测多个样本的类别
"""
return [self.predict_single(tuple(sample)) for sample in X]
def score(self, X_test, y_test):
"""
计算模型在测试集上的准确率
"""
predictions = self.predict(X_test)
correct = sum(1 for pred, true in zip(predictions, y_test) if pred == true)
return correct / len(y_test)
# 示例使用
if __name__ == "__main__":
# 创建示例数据集[2,5](@ref)
X_train = [
[1.0, 1.1],
[1.0, 1.0],
[0.0, 0.0],
[0.0, 0.1]
]
y_train = ['A', 'A', 'B', 'B']
# 创建KNN分类器
knn = KNN(k=3, weights='uniform', metric='euclidean')
# 训练模型
knn.fit(X_train, y_train)
# 预测新样本
test_sample = [0.2, 0.2]
prediction = knn.predict_single(test_sample)
print(f"样本 {test_sample} 的预测类别: {prediction}")
# 测试多个样本
test_samples = [[0.5, 0.5], [1.2, 1.0]]
predictions = knn.predict(test_samples)
print(f"批量预测结果: {predictions}")
# 测试准确率
X_test = [[0.1, 0.1], [1.1, 1.0]]
y_test = ['B', 'A']
accuracy = knn.score(X_test, y_test)
print(f"模型准确率: {accuracy:.2%}")
|
2301_80822435/machine-learning-course
|
assignment4/1班03.py
|
Python
|
mit
| 5,563
|
import numpy as np
from collections import Counter
class KNN:
def __init__(self, k=3):
self.k = k
self.X_train = None
self.y_train = None
def fit(self, X, y):
self.X_train = np.array(X)
self.y_train = np.array(y)
def euclidean_distance(self, x1, x2):
return np.sqrt(np.sum((x1 - x2) ** 2))
def manhattan_distance(self, x1, x2):
return np.sum(np.abs(x1 - x2))
def _get_neighbors(self, x):
distances = []
for i, train_sample in enumerate(self.X_train):
dist = self.euclidean_distance(x, train_sample)
distances.append((i, dist))
distances.sort(key=lambda x: x[1])
return distances[:self.k]
def predict(self, X):
X = np.array(X)
predictions = []
for sample in X:
neighbors = self._get_neighbors(sample)
neighbor_labels = [self.y_train[idx] for idx, _ in neighbors]
most_common = Counter(neighbor_labels).most_common(1)
predictions.append(most_common[0][0])
return np.array(predictions)
def predict_proba(self, X):
X = np.array(X)
probabilities = []
unique_classes = np.unique(self.y_train)
for sample in X:
neighbors = self._get_neighbors(sample)
neighbor_labels = [self.y_train[idx] for idx, _ in neighbors]
label_counts = Counter(neighbor_labels)
proba = [label_counts.get(cls, 0) / self.k for cls in unique_classes]
probabilities.append(proba)
return np.array(probabilities)
def score(self, X, y):
predictions = self.predict(X)
return np.mean(predictions == y)
|
2301_80822435/machine-learning-course
|
assignment4/1班04.py
|
Python
|
mit
| 1,713
|
import math
from collections import Counter
def euclidean_distance(x1, x2):
"""计算两个样本之间的欧氏距离"""
return math.sqrt(sum([(a - b) **2 for a, b in zip(x1, x2)]))
def knn_predict(X_train, y_train, x_test, k=3):
"""
单个样本的k近邻预测
参数:
X_train: 训练样本特征(列表,每个元素为样本的特征向量)
y_train: 训练样本标签(列表,与X_train一一对应)
x_test: 待预测的测试样本
k: 近邻数量
返回:
预测标签
"""
# 计算测试样本与所有训练样本的距离
distances = []
for i in range(len(X_train)):
dist = euclidean_distance(x_test, X_train[i])
distances.append((dist, y_train[i])) # 存储(距离,标签)元组
# 按距离升序排序,取前k个近邻
distances.sort()
k_neighbors = distances[:k]
# 提取近邻的标签并投票(多数表决)
k_labels = [label for (_, label) in k_neighbors]
most_common = Counter(k_labels).most_common(1) # 取出现次数最多的标签
return most_common[0][0]
def knn_classify(X_train, y_train, X_test, k=3):
"""
批量样本的k近邻分类
参数:
X_train: 训练样本特征
y_train: 训练样本标签
X_test: 测试样本特征(列表,每个元素为待预测样本)
k: 近邻数量
返回:
预测标签列表(与X_test一一对应)
"""
predictions = []
for x in X_test:
pred = knn_predict(X_train, y_train, x, k)
predictions.append(pred)
return predictions
|
2301_80822435/machine-learning-course
|
assignment4/1班05.py
|
Python
|
mit
| 1,637
|
import math
def euclidean_distance(point1, point2):
return math.sqrt(sum((a - b) ** 2 for a, b in zip(point1, point2)))
def knn_predict(training_data, training_labels, test_point, k=3):
if len(training_data) != len(training_labels):
raise ValueError("训练数据和标签长度不一致")
if k > len(training_data):
raise ValueError("k值不能大于训练数据数量")
distances = []
for i, train_point in enumerate(training_data):
dist = euclidean_distance(train_point, test_point)
distances.append((training_labels[i], dist))
distances.sort(key=lambda x: x[1])
k_nearest = distances[:k]
label_counts = {}
for label, _ in k_nearest:
if label in label_counts:
label_counts[label] += 1
else:
label_counts[label] = 1
max_count = 0
predicted_label = None
for label, count in label_counts.items():
if count > max_count:
max_count = count
predicted_label = label
return predicted_label
def knn_predict_all(training_data, training_labels, test_data, k=3):
predictions = []
for test_point in test_data:
prediction = knn_predict(training_data, training_labels, test_point, k)
predictions.append(prediction)
return predictions
def main():
training_data = [
[1, 2], [1, 4], [2, 1], [2, 3], [3, 2],
[6, 5], [7, 6], [8, 5], [8, 7], [7, 8]
]
training_labels = ['A', 'A', 'A', 'A', 'A',
'B', 'B', 'B', 'B', 'B']
test_points = [
[1.5, 2.5],
[7.5, 6.5],
[4, 4]
]
k = 3
predictions = knn_predict_all(training_data, training_labels, test_points, k)
print("K近邻算法预测结果 (k={}):".format(k))
for i, (point, prediction) in enumerate(zip(test_points, predictions)):
print(f"测试点 {point} -> 预测类别: {prediction}")
if __name__ == "__main__":
main()
|
2301_80822435/machine-learning-course
|
assignment4/1班10.py
|
Python
|
mit
| 2,008
|
import math
from collections import Counter
def euclidean_distance(x1, x2):
if len(x1) != len(x2):
raise ValueError("两个样本的维度必须一致")
squared_diff_sum = sum((a - b) ** 2 for a, b in zip(x1, x2))
return math.sqrt(squared_diff_sum)
def KNN(X_train, y_train, X_test, k=5, task="classification", distance_func=euclidean_distance):
# 1. 输入参数校验
if len(X_train) != len(y_train):
raise ValueError("训练样本特征与标签数量必须一致")
if k < 1 or k > len(X_train):
raise ValueError(f"k值必须满足 1 ≤ k ≤ 训练样本数(当前训练样本数:{len(X_train)})")
if task not in ["classification", "regression"]:
raise ValueError('task必须为 "classification" 或 "regression"')
is_single_test = not isinstance(X_test[0], (list, tuple))
if is_single_test:
X_test = [X_test]
# 2. 验证所有样本维度一致
n_features = len(X_train[0]) if X_train else 0
for sample in X_train + X_test:
if len(sample) != n_features:
raise ValueError("所有训练样本和测试样本必须具有相同的维度")
# 3. 对每个测试样本执行预测
predictions = []
for test_sample in X_test:
distances = []
for train_sample, train_label in zip(X_train, y_train):
dist = distance_func(test_sample, train_sample)
distances.append((dist, train_label))
distances.sort(key=lambda x: x[0])
k_neighbors = distances[:k]
k_neighbor_labels = [label for (dist, label) in k_neighbors]
if task == "classification":
vote_result = Counter(k_neighbor_labels).most_common(1)[0][0]
predictions.append(vote_result)
else:
regression_result = sum(k_neighbor_labels) / len(k_neighbor_labels)
predictions.append(regression_result)
return predictions[0] if is_single_test else predictions
|
2301_80822435/machine-learning-course
|
assignment4/1班13.py
|
Python
|
mit
| 1,966
|
import math
from collections import Counter
def euclidean_distance(x1, x2):
"""计算两个样本之间的欧氏距离"""
return math.sqrt(sum([(a - b) **2 for a, b in zip(x1, x2)]))
def knn_predict(train_data, train_labels, x, k):
"""
单个样本的k近邻预测
train_data: 训练样本列表(每个样本为可迭代对象)
train_labels: 训练样本对应的标签列表
x: 待预测样本
k: 近邻数量
return: 预测标签
"""
# 计算待预测样本与所有训练样本的距离
distances = []
for i, sample in enumerate(train_data):
dist = euclidean_distance(sample, x)
distances.append((dist, train_labels[i])) # (距离, 标签)
# 按距离排序,取前k个最近邻
distances.sort() # 按距离升序排列
k_nearest = distances[:k]
# 提取近邻的标签并投票(多数表决)
k_labels = [label for (dist, label) in k_nearest]
most_common = Counter(k_labels).most_common(1) # 取出现次数最多的标签
return most_common[0][0]
def knn_classify(train_data, train_labels, test_data, k=3):
"""
k近邻分类器
train_data: 训练样本列表
train_labels: 训练样本标签列表
test_data: 测试样本列表(待预测)
k: 近邻数量
return: 测试样本的预测标签列表
"""
# 输入校验
if len(train_data) != len(train_labels):
raise ValueError("训练样本与标签数量不匹配")
if k <= 0 or k > len(train_data):
raise ValueError("k值必须为正整数且不大于训练样本数量")
# 对每个测试样本进行预测
predictions = []
for x in test_data:
pred = knn_predict(train_data, train_labels, x, k)
predictions.append(pred)
return predictions
|
2301_80822435/machine-learning-course
|
assignment4/1班18.py
|
Python
|
mit
| 1,814
|
import math
from collections import Counter
def _distance(x1, x2):
"""计算两个样本的欧氏距离(内部辅助函数)"""
return math.sqrt(sum((a - b) **2 for a, b in zip(x1, x2)))
def knn_predict(sample, train_data, train_labels, k):
"""
单个样本的k近邻预测
参数:
sample: 待预测样本(如[1.2, 3.4])
train_data: 训练样本列表(每个元素为样本)
train_labels: 训练样本对应的标签列表
k: 近邻数量(正整数)
返回:
预测标签
"""
# 计算待预测样本与所有训练样本的距离
distances = []
for i in range(len(train_data)):
dist = _distance(sample, train_data[i])
distances.append((dist, train_labels[i])) # 存储(距离,标签)元组
# 按距离升序排序,取前k个近邻
distances.sort()
k_nearest_labels = [label for (_, label) in distances[:k]]
# 多数表决:返回出现次数最多的标签
return Counter(k_nearest_labels).most_common(1)[0][0]
def knn_classifier(test_data, train_data, train_labels, k=3):
"""
k近邻分类器(批量预测)
参数:
test_data: 测试样本列表
train_data: 训练样本列表
train_labels: 训练样本标签列表
k: 近邻数量(默认3)
返回:
测试样本的预测标签列表
"""
# 输入合法性检查
if len(train_data) != len(train_labels):
raise ValueError("训练数据与标签数量不匹配")
if k <= 0 or k > len(train_data):
raise ValueError(f"k值必须为(0, {len(train_data)}]之间的整数")
if not train_data:
raise ValueError("训练数据不能为空")
# 对每个测试样本进行预测
predictions = [knn_predict(sample, train_data, train_labels, k)
for sample in test_data]
return predictions
|
2301_80822435/machine-learning-course
|
assignment4/1班22.py
|
Python
|
mit
| 1,919
|
import math
def euclidean_distance(point1, point2):
"""手动计算欧几里得距离"""
if len(point1) != len(point2):
raise ValueError("点的维度不一致")
squared_sum = 0
for i in range(len(point1)):
squared_sum += (point1[i] - point2[i]) ** 2
return math.sqrt(squared_sum)
class KNN:
def __init__(self, k, label_num):
self.k = k
self.label_num = label_num
def fit(self, x_train, y_train):
self.x_train = x_train
self.y_train = y_train
def get_knn_indices(self, x):
"""手动实现获取K近邻索引"""
# 计算所有距离
distances = []
for i, train_point in enumerate(self.x_train):
dist = euclidean_distance(train_point, x)
distances.append((dist, i))
# 手动排序(冒泡排序)
for i in range(len(distances)):
for j in range(i + 1, len(distances)):
if distances[j][0] < distances[i][0]:
distances[i], distances[j] = distances[j], distances[i]
# 取前k个索引
knn_indices = [idx for _, idx in distances[:self.k]]
return knn_indices
def get_label(self, x):
"""手动实现类别预测"""
knn_indices = self.get_knn_indices(x)
# 手动统计类别
label_statistic = [0] * self.label_num
for index in knn_indices:
label = self.y_train[index]
label_statistic[label] += 1
# 手动找最大值
max_count = -1
best_label = -1
for label, count in enumerate(label_statistic):
if count > max_count:
max_count = count
best_label = label
return best_label
def predict(self, x_test):
"""手动实现批量预测"""
predicted_labels = []
for x in x_test:
predicted_labels.append(self.get_label(x))
return predicted_labels
def score(self, x_test, y_test):
"""手动计算准确率"""
predictions = self.predict(x_test)
correct = 0
for pred, true in zip(predictions, y_test):
if pred == true:
correct += 1
return correct / len(y_test)
|
2301_80822435/machine-learning-course
|
assignment4/1班23.py
|
Python
|
mit
| 2,293
|
import numpy as np
from sklearn.model_selection import KFold
def distance(a, b, metric='euclidean'):
if metric == 'euclidean':
return np.sqrt(np.sum((a - b) ** 2))
elif metric == 'manhattan':
return np.sum(np.abs(a - b))
else:
raise ValueError(f"不支持的距离度量: {metric}。请使用 'euclidean' 或 'manhattan'。")
class KNN:
def __init__(self, k=3, label_num=None, metric='euclidean', weights='uniform'):
"
self.k = k
self.label_num = label_num
self.metric = metric
self.weights = weights
self.x_train = None
self.y_train = None
self.best_k = None # 用于存储通过交叉验证找到的最佳k值
def fit(self, x_train, y_train):
self.x_train = x_train
self.y_train = y_train
if self.label_num is None:
self.label_num = len(np.unique(y_train))
print(f"自动检测到类别数量: {self.label_num}")
def _get_weights(self, distances):
"""
根据距离计算权重 (内部辅助函数)。
"""
if self.weights == 'uniform':
return np.ones_like(distances)
elif self.weights == 'distance':
# 避免除以零
return 1.0 / (distances + 1e-10)
else:
raise ValueError(f"不支持的权重方式: {self.weights}。请使用 'uniform' 或 'distance'。")
def get_knn_info(self, x):
"""
获取单个测试样本的k个最近邻的索引和距离。
"""
dis = np.array([distance(a, x, self.metric) for a in self.x_train])
sorted_indices = np.argsort(dis)
knn_indices = sorted_indices[:self.k]
knn_distances = dis[knn_indices]
return knn_indices, knn_distances
def get_label(self, x):
"""
对单个测试样本进行预测。
"""
knn_indices, knn_distances = self.get_knn_info(x)
weights = self._get_weights(knn_distances)
label_statistic = np.zeros(shape=[self.label_num])
for i, index in enumerate(knn_indices):
label = int(self.y_train[index])
label_statistic[label] += weights[i]
return np.argmax(label_statistic)
def predict(self, x_test):
"""
对多个测试样本进行预测。
"""
if self.x_train is None:
raise ValueError("模型尚未训练,请先调用 fit() 方法。")
predicted_test_labels = np.zeros(shape=[len(x_test)], dtype=int)
for i, x in enumerate(x_test):
predicted_test_labels[i] = self.get_label(x)
return predicted_test_labels
def score(self, x_test, y_test):
"""
计算模型在给定测试集上的准确率。
"""
y_pred = self.predict(x_test)
return np.mean(y_pred == y_test)
def find_best_k(self, k_range, x_val, y_val, cv=5):
"""
通过交叉验证在给定的范围内寻找最佳的k值。
参数:
k_range -- k值的搜索范围, 例如 range(1, 31)
x_val -- 用于交叉验证的特征数据
y_val -- 用于交叉验证的标签数据
cv -- 交叉验证的折数
"""
kf = KFold(n_splits=cv, shuffle=True, random_state=42)
best_k = None
best_score = 0
print(f"开始交叉验证寻找最佳k值 (范围: {list(k_range)})...")
for k in k_range:
self.k = k
fold_scores = []
for fold, (train_idx, val_idx) in enumerate(kf.split(x_val)):
x_fold_train, x_fold_val = x_val[train_idx], x_val[val_idx]
y_fold_train, y_fold_val = y_val[train_idx], y_val[val_idx]
self.fit(x_fold_train, y_fold_train)
fold_score = self.score(x_fold_val, y_fold_val)
fold_scores.append(fold_score)
mean_score = np.mean(fold_scores)
print(f"k={k}, 平均准确率: {mean_score:.4f}")
if mean_score > best_score:
best_score = mean_score
best_k = k
self.best_k = best_k
self.k = best_k # 将模型的k值设置为最佳k
print(f"\n找到的最佳k值为: {best_k} (准确率: {best_score:.4f})")
return best_k, best_score
|
2301_80822435/machine-learning-course
|
assignment4/1班28.py
|
Python
|
mit
| 4,421
|
import numpy as np
import matplotlib.pyplot as plt
# k近邻算法类
class KJinLin:
def __init__(self, k=3):
self.k = k # 选几个邻居
self.shu_ju = None # 存训练数据
self.biao_qian = None # 存训练标签
# 训练函数(其实就是存数据)
def xun_lian(self, xun_lian_shu_ju, xun_lian_biao_qian):
self.shu_ju = xun_lian_shu_ju
self.biao_qian = xun_lian_biao_qian
# 预测函数
def yu_ce(self, ce_shi_shu_ju):
yu_ce_jie_guo = []
# 逐个预测测试数据
for ce_shi_dian in ce_shi_shu_ju:
# 算这个测试点到所有训练点的距离
ju_li_list = []
for xun_lian_dian in self.shu_ju:
# 欧氏距离,一步一步算
ju_li = 0
for i in range(len(ce_shi_dian)):
ju_li = ju_li + (ce_shi_dian[i] - xun_lian_dian[i]) **2
ju_li = np.sqrt(ju_li)
ju_li_list.append(ju_li)
# 找最近的k个邻居的索引
zui_jin_de = np.argsort(ju_li_list)[:self.k]
# 统计这k个邻居的标签
biao_qian_tong_ji = {}
for idx in zui_jin_de:
bq = self.biao_qian[idx]
if bq in biao_qian_tong_ji:
biao_qian_tong_ji[bq] = biao_qian_tong_ji[bq] + 1
else:
biao_qian_tong_ji[bq] = 1
# 找出现次数最多的标签
zui_duo = -1
zui_hou_bq = 0
for key, value in biao_qian_tong_ji.items():
if value > zui_duo:
zui_duo = value
zui_hou_bq = key
yu_ce_jie_guo.append(zui_hou_bq)
return np.array(yu_ce_jie_guo)
# 测试一下
if __name__ == "__main__":
# 自己造点二维数据,分两类
def zao_shu_ju():
# 第一类数据,标签0
lei0 = np.random.normal(2, 1, (50, 2))
bq0 = np.zeros(50)
# 第二类数据,标签1
lei1 = np.random.normal(7, 1, (50, 2))
bq1 = np.ones(50)
# 合并
all_shu_ju = np.vstack((lei0, lei1))
all_biao_qian = np.hstack((bq0, bq1))
return all_shu_ju, all_biao_qian
# 生成训练数据
xun_lian_shu_ju, xun_lian_biao_qian = zao_shu_ju()
# 建个模型,k=5
model = KJinLin(k=5)
model.xun_lian(xun_lian_shu_ju, xun_lian_biao_qian)
# 造几个测试点
ce_shi_shu_ju = np.array([[2, 2], [3, 3], [6, 7], [8, 8], [5, 5]])
# 预测
ce_shi_jie_guo = model.yu_ce(ce_shi_shu_ju)
print("测试点的预测结果:", ce_shi_jie_guo)
# 画图
plt.scatter(xun_lian_shu_ju[:, 0], xun_lian_shu_ju[:, 1], c=xun_lian_biao_qian, cmap='coolwarm', s=30, label='训练数据')
plt.scatter(ce_shi_shu_ju[:, 0], ce_shi_shu_ju[:, 1], c=ce_shi_jie_guo, cmap='coolwarm', s=100, marker='*', label='测试数据')
plt.title('k近邻分类结果')
plt.legend()
plt.show()
|
2301_80822435/machine-learning-course
|
assignment4/1班29.py
|
Python
|
mit
| 3,072
|
import math
from collections import Counter
from operator import itemgetter
class KNN:
def __init__(self, k=3, task='classification'):
self.k = k
self.task = task
self.X_train = None
self.y_train = None
def fit(self, X, y):
self.X_train = X
self.y_train = y
def _euclidean_distance(self, a, b):
#计算两个向量之间的欧氏距离
return math.sqrt(sum((x - y) ** 2 for x, y in zip(a, b)))
def _get_k_neighbors(self, x):
#获取距离目标样本最近的 k 个邻居
distances = []
#与所有训练样本计算距离
for i, train_sample in enumerate(self.X_train):
dist = self._euclidean_distance(x, train_sample)
distances.append((i, dist))
#排序取最近的 k 个
distances.sort(key=itemgetter(1))
neighbors = distances[:self.k]
return neighbors
def predict(self, X):
#预测新样本标签(分类或回归)
predictions = []
for sample in X:
neighbors = self._get_k_neighbors(sample)
if self.task == 'classification':
# 多数投票
neighbor_labels = [self.y_train[idx] for idx, _ in neighbors]
most_common = Counter(neighbor_labels).most_common(1)[0][0]
predictions.append(most_common)
elif self.task == 'regression':
# 平均值
neighbor_values = [self.y_train[idx] for idx, _ in neighbors]
predictions.append(sum(neighbor_values) / len(neighbor_values))
return predictions
def predict_proba(self, X):
#返回每个样本属于各类别的概率(仅分类任务)
if self.task != 'classification':
raise ValueError("predict_proba 仅适用于分类任务")
probabilities = []
for sample in X:
neighbors = self._get_k_neighbors(sample)
neighbor_labels = [self.y_train[idx] for idx, _ in neighbors]
total = len(neighbors)
counts = Counter(neighbor_labels)
# 构造类别概率字典
prob_dict = {}
classes = set(self.y_train)
for c in classes:
prob_dict[c] = counts.get(c, 0) / total
probabilities.append(prob_dict)
return probabilities
|
2301_80822435/machine-learning-course
|
assignment4/1班30.py
|
Python
|
mit
| 2,424
|
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import seaborn as sns
class KNN:
def __init__(self, k=3, distance_metric='euclidean', weights='uniform'):
"""
k-最近邻算法实现
参数:
k: 邻居数量
distance_metric: 距离度量方法 ('euclidean', 'manhattan', 'minkowski')
weights: 权重类型 ('uniform', 'distance')
"""
self.k = k
self.distance_metric = distance_metric
self.weights = weights
self.X_train = None
self.y_train = None
def _calculate_distance(self, x1, x2):
"""计算两个样本点之间的距离"""
if self.distance_metric == 'euclidean':
return np.sqrt(np.sum((x1 - x2) ** 2))
elif self.distance_metric == 'manhattan':
return np.sum(np.abs(x1 - x2))
elif self.distance_metric == 'minkowski':
# 这里使用p=3作为示例
return np.sum(np.abs(x1 - x2) ** 3) ** (1/3)
else:
raise ValueError("不支持的距離度量方法")
def _get_neighbors(self, x):
"""获取测试样本x的k个最近邻居"""
distances = []
# 计算与所有训练样本的距离
for i, train_sample in enumerate(self.X_train):
dist = self._calculate_distance(x, train_sample)
distances.append((i, dist))
# 按距离排序并选择前k个
distances.sort(key=lambda x: x[1])
neighbors = distances[:self.k]
return neighbors
def _predict_single(self, x):
"""预测单个样本的类别"""
# 获取k个最近邻居
neighbors = self._get_neighbors(x)
if self.weights == 'uniform':
# 均匀权重 - 简单多数投票
neighbor_labels = [self.y_train[idx] for idx, _ in neighbors]
most_common = Counter(neighbor_labels).most_common(1)
return most_common[0][0]
elif self.weights == 'distance':
# 距离加权投票
votes = {}
for idx, dist in neighbors:
label = self.y_train[idx]
# 避免除零错误
if dist == 0:
weight = float('inf')
else:
weight = 1 / dist
if label in votes:
votes[label] += weight
else:
votes[label] = weight
# 返回权重和最大的类别
return max(votes.items(), key=lambda x: x[1])[0]
def fit(self, X, y):
"""
训练kNN模型(实际上只是存储数据)
参数:
X: 训练特征,形状 (n_samples, n_features)
y: 训练标签,形状 (n_samples,)
"""
self.X_train = np.array(X)
self.y_train = np.array(y)
return self
def predict(self, X):
"""
预测样本类别
参数:
X: 测试特征,形状 (n_samples, n_features)
返回:
predictions: 预测标签,形状 (n_samples,)
"""
X = np.array(X)
predictions = []
for x in X:
pred = self._predict_single(x)
predictions.append(pred)
return np.array(predictions)
def predict_proba(self, X):
"""
预测样本属于每个类别的概率
参数:
X: 测试特征,形状 (n_samples, n_features)
返回:
probabilities: 概率矩阵,形状 (n_samples, n_classes)
"""
X = np.array(X)
n_samples = X.shape[0]
classes = np.unique(self.y_train)
n_classes = len(classes)
probabilities = np.zeros((n_samples, n_classes))
for i, x in enumerate(X):
neighbors = self._get_neighbors(x)
neighbor_labels = [self.y_train[idx] for idx, _ in neighbors]
if self.weights == 'uniform':
# 计算每个类别的比例
for j, cls in enumerate(classes):
probabilities[i, j] = neighbor_labels.count(cls) / self.k
elif self.weights == 'distance':
# 基于距离加权计算概率
total_weight = 0
class_weights = {cls: 0 for cls in classes}
for idx, dist in neighbors:
label = self.y_train[idx]
weight = 1 / dist if dist != 0 else float('inf')
class_weights[label] += weight
total_weight += weight
for j, cls in enumerate(classes):
probabilities[i, j] = class_weights[cls] / total_weight
return probabilities
# 测试和可视化函数
def test_knn():
"""测试kNN算法并进行可视化"""
# 生成分类数据
X, y = make_classification(n_samples=300, n_features=2, n_redundant=0,
n_informative=2, n_clusters_per_class=1,
n_classes=3, random_state=42)
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# 创建并训练kNN模型
knn = KNN(k=5, weights='distance')
knn.fit(X_train, y_train)
# 预测
y_pred = knn.predict(X_test)
y_proba = knn.predict_proba(X_test)
# 计算准确率
accuracy = accuracy_score(y_test, y_pred)
print(f"测试集准确率: {accuracy:.4f}")
print("\n分类报告:")
print(classification_report(y_test, y_pred))
# 可视化结果
plt.figure(figsize=(18, 5))
# 1. 训练数据分布
plt.subplot(1, 3, 1)
scatter = plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap='viridis',
s=50, alpha=0.7, edgecolors='k')
plt.colorbar(scatter)
plt.title('训练数据分布')
plt.xlabel('特征 1')
plt.ylabel('特征 2')
# 2. 测试集预测结果
plt.subplot(1, 3, 2)
scatter = plt.scatter(X_test[:, 0], X_test[:, 1], c=y_pred, cmap='viridis',
s=50, alpha=0.7, edgecolors='k')
plt.colorbar(scatter)
plt.title(f'测试集预测结果 (准确率: {accuracy:.3f})')
plt.xlabel('特征 1')
plt.ylabel('特征 2')
# 3. 混淆矩阵
plt.subplot(1, 3, 3)
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
plt.title('混淆矩阵')
plt.xlabel('预测标签')
plt.ylabel('真实标签')
plt.tight_layout()
plt.show()
return knn, X_test, y_test, y_pred
def find_best_k(X_train, X_test, y_train, y_test, k_range=range(1, 16)):
"""寻找最佳的k值"""
accuracies = []
for k in k_range:
knn = KNN(k=k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
accuracies.append(accuracy)
# 绘制准确率随k值变化的曲线
plt.figure(figsize=(10, 6))
plt.plot(k_range, accuracies, 'bo-', linewidth=2, markersize=8)
plt.xlabel('k值')
plt.ylabel('准确率')
plt.title('k值选择对准确率的影响')
plt.grid(True, alpha=0.3)
best_k = k_range[np.argmax(accuracies)]
best_accuracy = max(accuracies)
plt.axvline(x=best_k, color='red', linestyle='--',
label=f'最佳k值: {best_k}, 准确率: {best_accuracy:.3f}')
plt.legend()
plt.show()
print(f"最佳k值: {best_k}, 对应准确率: {best_accuracy:.4f}")
return best_k, best_accuracy
def demo_different_metrics():
"""比较不同距离度量的效果"""
# 生成数据
X, y = make_classification(n_samples=200, n_features=2, n_redundant=0,
n_informative=2, n_classes=2, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
metrics = ['euclidean', 'manhattan']
results = {}
plt.figure(figsize=(15, 5))
for i, metric in enumerate(metrics):
knn = KNN(k=5, distance_metric=metric)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
results[metric] = accuracy
# 绘制决策边界
plt.subplot(1, 3, i+1)
# 创建网格点
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
# 预测网格点
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# 绘制决策边界
plt.contourf(xx, yy, Z, alpha=0.4, cmap='viridis')
scatter = plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test,
cmap='viridis', s=50, edgecolors='k')
plt.title(f'{metric}距离 (准确率: {accuracy:.3f})')
plt.xlabel('特征 1')
plt.ylabel('特征 2')
# 比较结果
plt.subplot(1, 3, 3)
metrics_names = list(results.keys())
acc_values = list(results.values())
bars = plt.bar(metrics_names, acc_values, color=['skyblue', 'lightcoral'])
plt.title('不同距离度量的准确率比较')
plt.ylabel('准确率')
# 在柱状图上显示数值
for bar, acc in zip(bars, acc_values):
plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01,
f'{acc:.3f}', ha='center', va='bottom')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
print("=== kNN算法实现与测试 ===\n")
# 基本测试
print("1. 基本kNN测试:")
knn_model, X_test, y_test, y_pred = test_knn()
# 寻找最佳k值
print("\n2. 寻找最佳k值:")
X, y = make_classification(n_samples=300, n_features=2, n_classes=3,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
find_best_k(X_train, X_test, y_train, y_test)
# 比较不同距离度量
print("\n3. 不同距离度量比较:")
demo_different_metrics()
# 演示概率预测
print("\n4. 概率预测示例:")
sample_proba = knn_model.predict_proba(X_test[:3])
print(f"前3个测试样本的类别概率:\n{sample_proba}")
|
2301_80822435/machine-learning-course
|
assignment4/1班31.py
|
Python
|
mit
| 11,100
|
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
class kNN:
def __init__(self, k=3):
self.k = k
self.X_train = None
self.y_train = None
def fit(self, X, y):
"""训练kNN模型(只是存储数据)"""
self.X_train = X
self.y_train = y
return self
def predict(self, X):
"""预测新样本的类别"""
predictions = [self._predict(x) for x in X]
return np.array(predictions)
def _predict(self, x):
"""预测单个样本的类别"""
# 计算距离
distances = np.linalg.norm(self.X_train - x, axis=1)
# 获取最近的k个邻居的索引
k_indices = np.argsort(distances)[:self.k]
# 获取k个邻居的标签
k_nearest_labels = self.y_train[k_indices]
# 投票决定类别
most_common = Counter(k_nearest_labels).most_common(1)
return most_common[0][0]
# 测试示例
if __name__ == "__main__":
# 生成分类数据
X, y = make_classification(n_samples=200, n_features=2, n_redundant=0,
n_informative=2, n_clusters_per_class=1,
random_state=42)
# 划分训练测试集
split = 150
X_train, X_test = X[:split], X[split:]
y_train, y_test = y[:split], y[split:]
# 训练kNN模型
knn = kNN(k=5)
knn.fit(X_train, y_train)
# 预测
y_pred = knn.predict(X_test)
# 计算准确率
accuracy = np.mean(y_pred == y_test)
print(f"测试集准确率: {accuracy:.3f}")
# 可视化结果
plt.figure(figsize=(12, 5))
# 训练数据
plt.subplot(131)
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap='viridis', alpha=0.7)
plt.title("训练数据")
# 测试数据真实标签
plt.subplot(132)
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap='viridis', alpha=0.7)
plt.title("测试数据真实标签")
# 测试数据预测标签
plt.subplot(133)
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_pred, cmap='viridis', alpha=0.7)
plt.title("测试数据预测标签")
plt.tight_layout()
plt.show()
# 演示单个预测过程
print("\n单个样本预测演示:")
test_sample = X_test[0]
print(f"测试样本: {test_sample}")
print(f"真实标签: {y_test[0]}")
print(f"预测标签: {y_pred[0]}")
|
2301_80822435/machine-learning-course
|
assignment4/1班32.py
|
Python
|
mit
| 2,547
|
from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.datasets import load_iris
class KNN:
def __init__(self, k=3):
self.k = k
self.X_train = None
self.y_train = None
def fit(self, X, y):
"""存储训练数据"""
self.X_train = X
self.y_train = y
return self
def euclidean_distance(self, x1, x2):
"""计算欧几里得距离"""
return np.sqrt(np.sum((x1 - x2) ** 2))
def predict(self, X):
"""预测新数据的类别"""
predictions = [self._predict(x) for x in X]
return np.array(predictions)
def _predict(self, x):
"""预测单个样本的类别"""
# 计算所有训练样本的距离
distances = [self.euclidean_distance(x, x_train)
for x_train in self.X_train]
# 获取最近的k个邻居的索引
k_indices = np.argsort(distances)[:self.k]
# 获取k个邻居的标签
k_nearest_labels = [self.y_train[i] for i in k_indices]
# 返回最常见的标签
most_common = Counter(k_nearest_labels).most_common(1)
return most_common[0][0]
def predict_proba(self, X):
"""预测概率"""
proba = []
for x in X:
distances = [self.euclidean_distance(x, x_train)
for x_train in self.X_train]
k_indices = np.argsort(distances)[:self.k]
k_nearest_labels = [self.y_train[i] for i in k_indices]
# 计算每个类别的概率
counter = Counter(k_nearest_labels)
proba_dict = {}
total = len(k_nearest_labels)
for label in set(self.y_train):
proba_dict[label] = counter.get(label, 0) / total
proba.append(proba_dict)
return proba
# 测试KNN算法
def test_knn():
# 加载鸢尾花数据集
iris = load_iris()
X, y = iris.data, iris.target
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42, stratify=y
)
# 应用KNN
knn = KNN(k=3)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
# 评估模型
accuracy = accuracy_score(y_test, y_pred)
print(f"KNN准确率: {accuracy:.4f}")
print("\n分类报告:")
print(classification_report(y_test, y_pred,
target_names=iris.target_names))
# 可视化特征重要性(基于特征范围)
feature_importance = np.std(X_train, axis=0)
plt.figure(figsize=(10, 6))
plt.bar(range(len(feature_importance)), feature_importance)
plt.xticks(range(len(feature_importance)), iris.feature_names)
plt.title('特征重要性(基于标准差)')
plt.ylabel('标准差')
plt.show()
return knn, accuracy
# 运行KNN测试
knn_model, accuracy = test_knn()
|
2301_80822435/machine-learning-course
|
assignment4/1班34.py
|
Python
|
mit
| 3,094
|
import math
import random
from collections import Counter
def euclidean_distance(x1, x2):
if len(x1) != len(x2):
raise ValueError("两个样本的特征维度必须一致")
dist_sq = sum((a - b) ** 2 for a, b in zip(x1, x2))
return math.sqrt(dist_sq)
def knn_predict(x_test, X_train, y_train, k=3, task="classification"):
n_train = len(X_train)
if n_train == 0:
raise ValueError("训练集不能为空")
if len(y_train) != n_train:
raise ValueError("训练集特征与标签长度必须一致")
if k <= 0 or k > n_train:
raise ValueError(f"k必须满足 0 < k ≤ 训练样本数(当前训练样本数={n_train})")
if task not in ["classification", "regression"]:
raise ValueError("task只能是'classification'或'regression'")
distances = []
for i in range(n_train):
x_train = X_train[i]
y_train_i = y_train[i]
dist = euclidean_distance(x_test, x_train)
distances.append((dist, y_train_i))
distances.sort(key=lambda x: x[0])
k_neighbors = distances[:k]
k_labels = [neighbor[1] for neighbor in k_neighbors]
if task == "classification":
vote_result = Counter(k_labels).most_common(1)[0][0]
return vote_result
else:
regression_result = sum(k_labels) / len(k_labels)
return regression_result
def KNN(X_test, X_train, y_train, k=3, task="classification"):
if len(X_test) == 0:
raise ValueError("测试集不能为空")
predictions = [knn_predict(x, X_train, y_train, k, task) for x in X_test]
return predictions
if __name__ == "__main__":
print("=" * 50)
print("测试1:KNN分类任务(鸢尾花简化数据集)")
print("=" * 50)
X_train_classify = [
[1.4, 0.2], [1.3, 0.2], [1.5, 0.2], [1.4, 0.3], [1.6, 0.2],
[4.5, 1.5], [4.2, 1.3], [4.3, 1.3], [4.4, 1.2], [4.1, 1.1]
]
y_train_classify = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
X_test_classify = [[1.4, 0.25], [4.3, 1.4], [2.0, 0.5], [4.0, 1.0]]
y_true_classify = [0, 1, 0, 1]
k = 3
predictions_classify = KNN(X_test_classify, X_train_classify, y_train_classify, k=k, task="classification")
print(f"训练集:{len(X_train_classify)}个样本,k={k}")
print(f"测试集样本:{X_test_classify}")
print(f"真实标签:{y_true_classify}")
print(f"预测标签:{predictions_classify}")
correct = sum([1 for p, t in zip(predictions_classify, y_true_classify) if p == t])
accuracy = correct / len(X_test_classify)
print(f"分类准确率:{accuracy:.2f}")
print("\n" + "=" * 50)
print("测试2:KNN回归任务(房价预测简化数据集)")
print("=" * 50)
X_train_regress = [
[50, 1], [60, 2], [70, 2], [80, 3], [90, 3],
[100, 4], [110, 4], [120, 5], [130, 5], [140, 6]
]
y_train_regress = [80, 95, 110, 125, 140, 155, 170, 185, 200, 215]
X_test_regress = [[65, 2], [95, 3], [115, 4]]
y_true_regress = [102.5, 147.5, 177.5]
predictions_regress = KNN(X_test_regress, X_train_regress, y_train_regress, k=k, task="regression")
print(f"训练集:{len(X_train_regress)}个样本,k={k}")
print(f"测试集样本:{X_test_regress}")
print(f"真实房价:{y_true_regress}")
print(f"预测房价:{[round(p, 2) for p in predictions_regress]}")
mse = sum([(p - t) ** 2 for p, t in zip(predictions_regress, y_true_regress)]) / len(X_test_regress)
print(f"回归MSE(越小越好):{mse:.2f}")
|
2301_80822435/machine-learning-course
|
assignment4/2班37.py
|
Python
|
mit
| 3,511
|
import random
import math
from collections import Counter
class KNN:
def __init__(self, k=3):
if k <= 0:
raise ValueError("K must be a positive integer.")
self.k = k
self.x_train = None
self.y_train = None
def _euclidean_distance(self, p1, p2):
return math.sqrt(sum((a - b) ** 2 for a, b in zip(p1, p2)))
def fit(self, x_train, y_train):
if len(x_train) != len(y_train):
raise ValueError("训练数据和标签的长度必须相同。")
self.x_train = x_train
self.y_train = y_train
print(f"模型已训练,训练集大小: {len(self.x_train)}")
def _get_knn_indices(self, test_point):
distances = [self._euclidean_distance(test_point, train_point) for train_point in self.x_train]
sorted_indices = sorted(range(len(distances)), key=lambda i: distances[i])
return sorted_indices[:self.k]
def _get_label(self, test_point):
knn_indices = self._get_knn_indices(test_point)
neighbor_labels = [self.y_train[i] for i in knn_indices]
most_common_label = Counter(neighbor_labels).most_common(1)[0][0]
return most_common_label
def predict(self, x_test):
if not self.x_train or not self.y_train:
raise RuntimeError("模型尚未训练,请先调用fit方法。")
predicted_labels = [self._get_label(point) for point in x_test]
return predicted_labels
if __name__ == "__main__":
train_data = [
[158, 50], [160, 52], [165, 55], [168, 58],
[170, 65], [172, 68], [175, 70], [178, 75],
[180, 85], [182, 88], [185, 90], [188, 95]
]
train_labels = ['瘦', '瘦', '瘦', '瘦', '正常', '正常', '正常', '正常', '胖', '胖', '胖', '胖']
test_points = [
[162, 53],
[174, 72],
[183, 92]
]
print("--- 使用 K=3 进行预测 ---")
knn_classifier = KNN(k=3)
knn_classifier.fit(train_data, train_labels)
predictions = knn_classifier.predict(test_points)
for i, point in enumerate(test_points):
print(f"测试点 {i + 1}: 特征={point} -> 预测结果: '{predictions[i]}'")
print("\n--- 评估不同K值的效果 ---")
random.seed(0)
all_indices = list(range(len(train_data)))
random.shuffle(all_indices)
eval_split = int(len(train_data) * 0.7)
eval_x_train = [train_data[i] for i in all_indices[:eval_split]]
eval_y_train = [train_labels[i] for i in all_indices[:eval_split]]
eval_x_test = [train_data[i] for i in all_indices[eval_split:]]
eval_y_test = [train_labels[i] for i in all_indices[eval_split:]]
for k in range(1, 6):
knn_eval = KNN(k=k)
knn_eval.fit(eval_x_train, eval_y_train)
eval_predictions = knn_eval.predict(eval_x_test)
correct = sum(1 for true, pred in zip(eval_y_test, eval_predictions) if true == pred)
accuracy = correct / len(eval_y_test)
print(f'K的取值为 {k}, 预测准确率为 {accuracy * 100:.1f}%')
|
2301_80822435/machine-learning-course
|
assignment4/2班39.py
|
Python
|
mit
| 3,035
|
import math
from collections import Counter
def knn_predict(train_data, train_labels, test_point, k=3):
if len(train_data) != len(train_labels):
raise ValueError("训练数据和标签的数量不匹配")
if k <= 0:
raise ValueError("k值必须大于0")
all_distances_info = []
for i, point in enumerate(train_data):
dist = math.sqrt(sum((test_point[j] - point[j])**2 for j in range(len(test_point))))
all_distances_info.append((dist, point, train_labels[i]))
all_distances_info.sort(key=lambda x: x[0])
k_nearest_neighbors_info = all_distances_info[:k]
neighbor_labels = [label for _, _, label in k_nearest_neighbors_info]
vote_counts = Counter(neighbor_labels)
prediction = vote_counts.most_common(1)[0][0]
return prediction, all_distances_info
if __name__ == "__main__":
print("--- KNN算法详细过程演示 (电影评级预测) ---")
train_features = [
[95, 5],
[88, 2],
[110, 15],
[150, 200],
[105, 8],
[140, 150],
[98, 10],
[160, 250],
]
train_labels = ['普通', '普通', '大片', '大片', '普通', '大片', '普通', '大片']
test_movie = [120, 50]
k = 3
print(f"训练集数据 ({len(train_features)} 部):")
for i, (data, label) in enumerate(zip(train_features, train_labels)):
print(f" 电影{i+1}: [时长={data[0]}min, 预算={data[1]}M$] -> 评级: {label}")
print(f"\n待预测电影: [时长={test_movie[0]}min, 预算={test_movie[1]}M$]")
print(f"设定 k={k}")
prediction, detailed_info = knn_predict(train_features, train_labels, test_movie, k=k)
print("\n--- 详细分析过程 ---")
print("1. 计算待预测电影与所有已知电影的距离:")
for i, (dist, point, label) in enumerate(detailed_info):
print(f" - 与电影{i+1} ([{point[0]}min, {point[1]}M$], '{label}') 的距离 = {dist:.4f}")
print(f"\n2. 距离最近的 {k} 个邻居是:")
k_nearest = detailed_info[:k]
for i, (dist, point, label) in enumerate(k_nearest):
print(f" - 邻居{i+1}: [{point[0]}min, {point[1]}M$], 评级='{label}', 距离={dist:.4f}")
neighbor_labels_for_vote = [label for _, _, label in k_nearest]
print(f"\n3. 这 {k} 个邻居的评级分别是: {neighbor_labels_for_vote}")
vote_counts = Counter(neighbor_labels_for_vote)
print(f"4. 投票统计: {dict(vote_counts)}")
print("\n--- 最终预测结果 ---")
print(f"待预测电影 (时长{test_movie[0]}分钟, 预算{test_movie[1]}M$) 的预测评级是: '{prediction}'")
|
2301_80822435/machine-learning-course
|
assignment4/2班40.py
|
Python
|
mit
| 2,687
|
import math
from collections import Counter
def euclidean_distance(x1, x2): # 计算欧氏距离
if len(x1) != len(x2):
raise ValueError("两个样本的特征维度必须一致")
return math.sqrt(sum([(a - b) ** 2 for a, b in zip(x1, x2)]))
def knn_predict(x, train_data, train_labels, k):
# 检查输入有效性
if len(train_data) != len(train_labels):
raise ValueError("训练数据和标签数量必须一致")
if k <= 0 or k > len(train_data):
raise ValueError("k值必须为正且不大于训练样本数量")
# 计算待预测样本与所有训练样本的距离
distances = []
for i in range(len(train_data)):
dist = euclidean_distance(x, train_data[i])
distances.append((dist, train_labels[i]))
# 按距离升序排序,取前k个近邻
distances.sort(key=lambda x: x[0])
k_nearest_labels = [label for (dist, label) in distances[:k]]
# 多数投票决定预测结果
most_common = Counter(k_nearest_labels).most_common(1)
return most_common[0][0]
def knn_classify(test_data, train_data, train_labels, k):
predictions = []
for x in test_data:
pred = knn_predict(x, train_data, train_labels, k)
predictions.append(pred)
return predictions
if __name__ == "__main__":
# 训练数据
train_data = [
[1.2, 3.1], [1.9, 2.8], [2.3, 3.5], # 类别0
[5.4, 6.2], [6.1, 5.8], [5.9, 6.5], # 类别1
[8.3, 2.1], [7.9, 1.8], [8.5, 2.5] # 类别2
]
# 训练标签
train_labels = [0, 0, 0, 1, 1, 1, 2, 2, 2]
# 测试数据
test_data = [
[2.0, 3.0], # 预期类别0
[5.8, 6.0], # 预期类别1
[8.0, 2.0] # 预期类别2
]
# k=3时的预测结果
k = 3
predictions = knn_classify(test_data, train_data, train_labels, k)
# 打印结果
print(f"k={k}时的预测结果:")
for i, (test_sample, pred) in enumerate(zip(test_data, predictions)):
print(f"样本 {test_sample} 预测类别:{pred}")
|
2301_80822435/machine-learning-course
|
assignment4/2班41.py
|
Python
|
mit
| 2,042
|
import math
import heapq
from collections import Counter
def knn_predict(
x_test,
x_train,
y_train,
k=3,
distance_type='euclidean',
task='classification'
):
"""
k近邻算法预测函数
:param x_test: 单个测试样本(列表/元组,如[x1, x2, ...])
:param x_train: 训练集特征(列表嵌套列表,如[[x1,y1], [x2,y2], ...])
:param y_train: 训练集标签(列表,分类任务为离散值,回归任务为连续值)
:param k: 近邻数量(正整数,默认3)
:param distance_type: 距离度量类型(默认'euclidean'欧氏距离,可选'manhattan'曼哈顿距离)
:param task: 任务类型(默认'classification'分类,可选'regression'回归)
:return: 预测结果(分类为类别标签,回归为预测数值)
"""
# 输入合法性检查
if not x_train or not y_train:
raise ValueError("训练集特征和标签不能为空")
if len(x_train) != len(y_train):
raise ValueError("训练集特征和标签数量必须一致")
if k <= 0 or k > len(x_train):
raise ValueError(f"k必须满足 0 < k ≤ 训练集数量(当前训练集数量:{len(x_train)})")
sample_dim = len(x_train[0])
if len(x_test) != sample_dim:
raise ValueError(f"测试样本维度({len(x_test)})与训练样本维度({sample_dim})不一致")
if distance_type not in ['euclidean', 'manhattan']:
raise ValueError("distance_type仅支持'euclidean'(欧氏距离)和'manhattan'(曼哈顿距离)")
if task not in ['classification', 'regression']:
raise ValueError("task仅支持'classification'(分类)和'regression'(回归)")
# 1. 计算测试样本与所有训练样本的距离
distances = []
for i, x_train_sample in enumerate(x_train):
if distance_type == 'euclidean':
# 欧氏距离:d = sqrt(sum((x_test_i - x_train_i)^2))
dist = math.sqrt(sum((test_dim - train_dim)**2 for test_dim, train_dim in zip(x_test, x_train_sample)))
else: # manhattan
# 曼哈顿距离:d = sum(|x_test_i - x_train_i|)
dist = sum(abs(test_dim - train_dim) for test_dim, train_dim in zip(x_test, x_train_sample))
# 存储(距离,训练样本索引),方便后续取标签
distances.append((dist, i))
# 2. 找到距离最近的k个样本(使用小顶堆高效获取Top-K,时间复杂度O(n log k))
# heapq.nsmallest返回前k个最小元素,按距离升序排列
k_nearest = heapq.nsmallest(k, distances, key=lambda x: x[0])
# 3. 提取k个近邻的标签
k_nearest_labels = [y_train[i] for (dist, i) in k_nearest]
# 4. 根据任务类型计算预测结果
if task == 'classification':
# 分类任务:投票法(取出现次数最多的标签)
counter = Counter(k_nearest_labels)
# most_common(1)返回[(标签, 次数)],取第一个元素的标签
y_pred = counter.most_common(1)[0][0]
else: # regression
# 回归任务:平均值法(取k个近邻标签的均值)
y_pred = sum(k_nearest_labels) / len(k_nearest_labels)
return y_pred
def knn_batch_predict(
x_test_batch,
x_train,
y_train,
k=3,
distance_type='euclidean',
task='classification'
):
"""
批量预测函数(对多个测试样本进行预测)
:param x_test_batch: 测试集(列表嵌套列表,如[[x1,y1], [x2,y2], ...])
:return: 批量预测结果(列表)
"""
return [
knn_predict(x_test, x_train, y_train, k, distance_type, task)
for x_test in x_test_batch
]
|
2301_80822435/machine-learning-course
|
assignment4/2班42.py
|
Python
|
mit
| 3,718
|
import math
import random
from collections import Counter
random.seed(42)
def calc_euclidean_dist(s1, s2):
# 统一校验样本格式与特征类型
for s, name in [(s1, "样本1"), (s2, "样本2")]:
if not isinstance(s, list):
raise TypeError(f"{name}需为列表类型")
if not all(isinstance(val, (int, float)) for val in s):
raise TypeError(f"{name}特征值需为整数/浮点数")
# 校验维度一致性
if len(s1) != len(s2):
raise ValueError(f"样本维度不匹配:样本1({len(s1)}维) != 样本2({len(s2)}维)")
# 计算欧氏距离
return math.sqrt(sum((a - b) ** 2 for a, b in zip(s1, s2)))
def knn_predict(X_train, y_train, x_test, k=5, task="classification"):
# 核心输入校验(整合重复逻辑,聚焦关键校验点)
if not (isinstance(X_train, list) and isinstance(y_train, list) and X_train and y_train):
raise ValueError("X_train/y_train需为非空列表,且一一对应")
if len(X_train) != len(y_train):
raise ValueError(f"X_train({len(X_train)}个样本)与y_train({len(y_train)}个标签)数量不匹配")
if not (isinstance(k, int) and 0 < k <= len(X_train)):
raise ValueError(f"k需为满足0 < k ≤ {len(X_train)}的整数")
if task not in ["classification", "regression"]:
raise ValueError("task仅支持'classification'(分类), 'regression'(回归)")
if task == "regression" and not all(isinstance(lab, (int, float)) for lab in y_train):
raise TypeError("回归任务y_train标签需为整数/浮点数")
calc_euclidean_dist(X_train[0], x_test) # 校验测试样本维度
# 计算距离+筛选top-k近邻
dist_labels = [(calc_euclidean_dist(x, x_test), lab) for x, lab in zip(X_train, y_train)]
dist_labels.sort(key=lambda x: (x[0], random.random())) # 按距离排序,同距随机
top_k_labels = [lab for _, lab in dist_labels[:k]]
# 按任务输出结果
if task == "classification":
cnt = Counter(top_k_labels)
max_cnt = max(cnt.values())
return random.choice([lab for lab, c in cnt.items() if c == max_cnt])
else:
return round(sum(top_k_labels) / k, 4)
def knn_evaluate(X_train, y_train, X_test, y_test, k=5, task="classification"):
# 测试集基础校验
if not (isinstance(X_test, list) and isinstance(y_test, list) and X_test and y_test):
raise ValueError("X_test/y_test需为非空列表,且一一对应")
if len(X_test) != len(y_test):
raise ValueError(f"X_test({len(X_test)}个样本)与y_test({len(y_test)}个标签)数量不匹配")
if task == "regression" and not all(isinstance(lab, (int, float)) for lab in y_test):
raise TypeError("回归任务y_test标签需为整数/浮点数")
calc_euclidean_dist(X_train[0], X_test[0]) # 校验维度一致性
# 批量预测+计算评估指标
y_pred = [knn_predict(X_train, y_train, x, k, task) for x in X_test]
if task == "classification":
correct = sum(1 for t, p in zip(y_test, y_pred) if t == p)
acc = correct / len(y_test)
return {"准确率": round(acc, 4), "正确数/总数": f"{correct}/{len(y_test)}"}
else:
mse = sum((t - p) ** 2 for t, p in zip(y_test, y_pred)) / len(y_test)
return {"均方误差(MSE)": round(mse, 4), "测试样本数": len(y_test)}
if __name__ == "__main__":
# 鸢尾花分类任务验证
print("【鸢尾花品种预测】特征:[花瓣长度,花瓣宽度] | 标签:0=山鸢尾,1=变色鸢尾,2=维吉尼亚鸢尾")
# 数据集
X_train = [[1.4, 0.2], [1.3, 0.2], [1.5, 0.2], [1.4, 0.3],
[4.7, 1.4], [4.5, 1.5], [4.9, 1.5], [4.7, 1.6],
[6.7, 2.2], [6.3, 1.8], [6.5, 2.0], [6.2, 2.3]]
y_train = [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]
X_test = [[1.6, 0.2], [4.8, 1.5], [6.4, 2.1], [1.3, 0.3], [5.0, 1.7]]
y_test = [0, 1, 2, 0, 1]
# 单样本预测
single_pred = knn_predict(X_train, y_train, [4.6, 1.4], k=3)
print(f"样本[4.6,1.4]预测标签:{single_pred}")
# 测试集评估
eval_res = knn_evaluate(X_train, y_train, X_test, y_test, k=3)
print(f"分类评估结果:准确率={eval_res['准确率']}({eval_res['正确数/总数']})")
|
2301_80822435/machine-learning-course
|
assignment4/2班43.py
|
Python
|
mit
| 4,283
|
import numpy as np
from collections import Counter
class KNN:
def __init__(self, k=3):
self.k = k
def fit(self, X, y):
self.X_train = X
self.y_train = y
def predict(self, X):
return np.array([self._predict_one(x) for x in X])
def _predict_one(self, x):
# 计算该样本与所有训练样本的欧式距离
distances = np.linalg.norm(self.X_train - x, axis=1)
# 找到距离最近的 k 个样本的索引
k_idx = np.argsort(distances)[:self.k]
# 取出对应的标签
k_neighbor_labels = self.y_train[k_idx]
# 进行多数投票
label = Counter(k_neighbor_labels).most_common(1)[0][0]
return label
if __name__ == "__main__":
# 构造两类数据(简单二维)
X_train = np.array([
[1, 2], [2, 3], [3, 1], # 类 0
[6, 7], [7, 8], [8, 6] # 类 1
])
y_train = np.array([0, 0, 0, 1, 1, 1])
knn = KNN(k=3)
knn.fit(X_train, y_train)
X_test = np.array([
[2, 2],
[7, 7]
])
preds = knn.predict(X_test)
print("预测结果:", preds)
|
2301_80822435/machine-learning-course
|
assignment4/2班45号.py
|
Python
|
mit
| 1,138
|