准备信息
- 官方资料:https://www.isc.org/kea/
- 安装版本:
- kea - DHCP主程序:v1.9.10
- stork - 仪表盘:v0.19.0
- 安装环境:Debian 11 x64
- 项目仓库:
- 安装目标:
- 1、使用kea实现DHCP
- 2、使用stork用作Web仪表盘(只能看不能操作)
- 安装依赖:
简易安装项
- gcc
- g++
- curl
- make
- autoconf
- libtool
- libmysql+±dev
- libboost-system-dev
需要配置安装项
源码编译安装
安装kea步骤
注意:本次安装过程所用操作使用root账户进行
- 1、初始化环境
- 安装依赖项
apt install gcc g++ curl make libtool autoconf libmysql++-dev libboost-system-dev
- 安装log4cplus
仓库缺少ThreadPool,需要手动下载:
mkdir ~/src/log4cplus
cd ~/src/log4cplus
wget https://github.com/log4cplus/log4cplus/archive/refs/tags/REL_2_0_7.tar.gz
tar zxvf REL_2_0_7.tar.gz
cd log4cplus-REL_2_0_7/
./configure
make
make install
- 2、安装并配置数据库(此处安装mariadb-server)
apt install mariadb-server
mysql_secure_installation
vim /etc/mysql/mariadb.conf.d/50-server.cnf
mysql -u root
MariaDB [mysql]> use mysql;
MariaDB [mysql]> select host,user from user;
+-----------+-------------+
| Host | User |
+-----------+-------------+
| localhost | mariadb.sys |
| localhost | mysql |
| localhost | root |
+-----------+-------------+
3 rows in set (0.002 sec)
MariaDB [mysql]> CREATE DATABASE w21DHCP;
MariaDB [mysql]> CREATE USER 'w21dhcp'@'localhost' IDENTIFIED BY 'w21@dhcp';
MariaDB [mysql]> flush privileges;
MariaDB [mysql]> GRANT ALL ON w21DHCP.* to 'w21dhcp'@'localhost' IDENTIFIED BY 'w21@dhcp';
MariaDB [mysql]> flush privileges;
- 3、编译安装kea
mkdir ~/src/kea
cd ~/src/kea
wget https://github.com/isc-projects/kea/archive/refs/tags/Kea-1.9.10.tar.gz
tar zxvf Kea-1.9.10.tar.gz
cd kea-Kea-1.9.10/
autoreconf --install
./configure --with-mysql
make
make install
- 4、初始化kea数据库
此处提供两种初始化方法
kea-admin db-init mysql -u w21dhcp -p w21@dhcp -n w21DHCP
mysql> CONNECT w21DHCP;
mysql> SOURCE path-to-kea/share/kea/scripts/mysql/dhcpdb_create.mysql
安装stork仪表盘
参考链接:Installing the Stork Server
-
1、安装stork
Ubuntu/Debian
curl -1sLf 'https://dl.cloudsmith.io/public/isc/stork/cfg/setup/bash.deb.sh' | sudo bash
sudo apt install isc-stork-server
CentOS/RHEL/Fedora
curl -1sLf 'https://dl.cloudsmith.io/public/isc/stork/cfg/setup/bash.rpm.sh' | sudo bash
sudo dnf install isc-stork-server
-
2、安装PostgreSQL并配置数据库 apt-get install postgresql
su postgresql
createdb storkdb
psql
postgres=
postgres=
-
2、配置数据库 编辑配置文件:/etc/stork/server.env STORK_DATABASE_HOST - PostgreSQL 数据库的地址;默认是本地主机
STORK_DATABASE_PORT - PostgreSQL 数据库的端口;默认值为5432
STORK_DATABASE_NAME - 数据库名称;默认是stork
STORK_DATABASE_USER_NAME - 连接数据库的用户名;默认是stork
STORK_DATABASE_PASSWORD - 连接到数据库的用户名的密码
-
3、启动stork
systemctl start isc-stork-server
systemctl enable isc-stork-server
浏览器访问:http://{IP}:{8080} 帐号:admin 密码:admin
附件
ThreadPool.h
#ifndef THREAD_POOL_H_7ea1ee6b_4f17_4c09_b76b_3d44e102400c
#define THREAD_POOL_H_7ea1ee6b_4f17_4c09_b76b_3d44e102400c
#include <vector>
#include <queue>
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <atomic>
#include <functional>
#include <stdexcept>
#include <algorithm>
#include <cassert>
namespace progschj {
class ThreadPool {
public:
explicit ThreadPool(std::size_t threads
= (std::max)(2u, std::thread::hardware_concurrency()));
template<class F, class... Args>
auto enqueue(F&& f, Args&&... args)
-> std::future<
#if defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703
typename std::invoke_result<F&&, Args&&...>::type
#else
typename std::result_of<F&& (Args&&...)>::type
#endif
>;
void wait_until_empty();
void wait_until_nothing_in_flight();
void set_queue_size_limit(std::size_t limit);
void set_pool_size(std::size_t limit);
~ThreadPool();
private:
void start_worker(std::size_t worker_number,
std::unique_lock<std::mutex> const &lock);
std::vector< std::thread > workers;
std::size_t pool_size;
std::queue< std::function<void()> > tasks;
std::size_t max_queue_size = 100000;
bool stop = false;
std::mutex queue_mutex;
std::condition_variable condition_producers;
std::condition_variable condition_consumers;
std::mutex in_flight_mutex;
std::condition_variable in_flight_condition;
std::atomic<std::size_t> in_flight;
struct handle_in_flight_decrement
{
ThreadPool & tp;
handle_in_flight_decrement(ThreadPool & tp_)
: tp(tp_)
{ }
~handle_in_flight_decrement()
{
std::size_t prev
= std::atomic_fetch_sub_explicit(&tp.in_flight,
std::size_t(1),
std::memory_order_acq_rel);
if (prev == 1)
{
std::unique_lock<std::mutex> guard(tp.in_flight_mutex);
tp.in_flight_condition.notify_all();
}
}
};
};
inline ThreadPool::ThreadPool(std::size_t threads)
: pool_size(threads)
, in_flight(0)
{
std::unique_lock<std::mutex> lock(this->queue_mutex);
for (std::size_t i = 0; i != threads; ++i)
start_worker(i, lock);
}
template<class F, class... Args>
auto ThreadPool::enqueue(F&& f, Args&&... args)
-> std::future<
#if defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703
typename std::invoke_result<F&&, Args&&...>::type
#else
typename std::result_of<F&& (Args&&...)>::type
#endif
>
{
#if defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703
using return_type = typename std::invoke_result<F&&, Args&&...>::type;
#else
using return_type = typename std::result_of<F&& (Args&&...)>::type;
#endif
auto task = std::make_shared< std::packaged_task<return_type()> >(
std::bind(std::forward<F>(f), std::forward<Args>(args)...)
);
std::future<return_type> res = task->get_future();
std::unique_lock<std::mutex> lock(queue_mutex);
if (tasks.size () >= max_queue_size)
condition_producers.wait(lock,
[this]
{
return tasks.size () < max_queue_size
|| stop;
});
if (stop)
throw std::runtime_error("enqueue on stopped ThreadPool");
tasks.emplace([task](){ (*task)(); });
std::atomic_fetch_add_explicit(&in_flight,
std::size_t(1),
std::memory_order_relaxed);
condition_consumers.notify_one();
return res;
}
inline ThreadPool::~ThreadPool()
{
std::unique_lock<std::mutex> lock(queue_mutex);
stop = true;
pool_size = 0;
condition_consumers.notify_all();
condition_producers.notify_all();
condition_consumers.wait(lock, [this]{ return this->workers.empty(); });
assert(in_flight == 0);
}
inline void ThreadPool::wait_until_empty()
{
std::unique_lock<std::mutex> lock(this->queue_mutex);
this->condition_producers.wait(lock,
[this]{ return this->tasks.empty(); });
}
inline void ThreadPool::wait_until_nothing_in_flight()
{
std::unique_lock<std::mutex> lock(this->in_flight_mutex);
this->in_flight_condition.wait(lock,
[this]{ return this->in_flight == 0; });
}
inline void ThreadPool::set_queue_size_limit(std::size_t limit)
{
std::unique_lock<std::mutex> lock(this->queue_mutex);
if (stop)
return;
std::size_t const old_limit = max_queue_size;
max_queue_size = (std::max)(limit, std::size_t(1));
if (old_limit < max_queue_size)
condition_producers.notify_all();
}
inline void ThreadPool::set_pool_size(std::size_t limit)
{
if (limit < 1)
limit = 1;
std::unique_lock<std::mutex> lock(this->queue_mutex);
if (stop)
return;
std::size_t const old_size = pool_size;
assert(this->workers.size() >= old_size);
pool_size = limit;
if (pool_size > old_size)
{
for (std::size_t i = old_size; i != pool_size; ++i)
start_worker(i, lock);
}
else if (pool_size < old_size)
this->condition_consumers.notify_all();
}
inline void ThreadPool::start_worker(
std::size_t worker_number, std::unique_lock<std::mutex> const &lock)
{
assert(lock.owns_lock() && lock.mutex() == &this->queue_mutex);
assert(worker_number <= this->workers.size());
auto worker_func =
[this, worker_number]
{
for(;;)
{
std::function<void()> task;
bool notify;
{
std::unique_lock<std::mutex> lock(this->queue_mutex);
this->condition_consumers.wait(lock,
[this, worker_number]{
return this->stop || !this->tasks.empty()
|| pool_size < worker_number + 1; });
if ((this->stop && this->tasks.empty())
|| (!this->stop && pool_size < worker_number + 1))
{
this->workers[worker_number].detach();
while (this->workers.size() > pool_size
&& !this->workers.back().joinable())
this->workers.pop_back();
if (this->workers.empty())
this->condition_consumers.notify_all();
return;
}
else if (!this->tasks.empty())
{
task = std::move(this->tasks.front());
this->tasks.pop();
notify = this->tasks.size() + 1 == max_queue_size
|| this->tasks.empty();
}
else
continue;
}
handle_in_flight_decrement guard(*this);
if (notify)
{
std::unique_lock<std::mutex> lock(this->queue_mutex);
condition_producers.notify_all();
}
task();
}
};
if (worker_number < this->workers.size()) {
std::thread & worker = this->workers[worker_number];
if (!worker.joinable()) {
worker = std::thread(worker_func);
}
} else
this->workers.push_back(std::thread(worker_func));
}
}
#endif
/usr/local/etc/kea/kea-dhcp4.conf
{
"Dhcp4": {
"interfaces-config": {
"interfaces": [
"ens18/172.16.0.2",
"ens19/172.16.16.2",
"ens20/172.16.32.2",
"ens21/192.168.101.2"
]
},
"control-socket": {
"socket-type": "unix",
"socket-name": "/tmp/kea4-ctrl-socket"
},
"lease-database": {
"type": "memfile",
"lfc-interval": 3600
},
"expired-leases-processing": {
"reclaim-timer-wait-time": 10,
"flush-reclaimed-timer-wait-time": 25,
"hold-reclaimed-time": 3600,
"max-reclaim-leases": 100,
"max-reclaim-time": 250,
"unwarned-reclaim-cycles": 5
},
"renew-timer": 900,
"rebind-timer": 1800,
"valid-lifetime": 3600,
"option-data": [
{
"name": "domain-name-servers",
"data": "114.114.114.114, 180.76.76.76"
}
],
"client-classes": [
{
"name": "voip",
"test": "substring(option[60].hex,0,6) == 'Aastra'",
"next-server": "192.0.2.254",
"server-hostname": "hal9000",
"boot-file-name": "/dev/null"
}
],
"subnet4": [
{
"subnet": "172.16.0.0/20",
"pools": [ { "pool": "172.16.13.20 - 172.16.15.254" } ],
"option-data": [
{
"name": "routers",
"data": "172.16.0.1"
}
],
"reservations": [
{
"hw-address": "76:A4:3B:55:00:7B",
"ip-address": "172.16.0.10"
}
]
},
{
"subnet": "172.16.16.0/20",
"pools": [ { "pool": "172.16.20.30 - 172.16.31.254" } ],
"option-data": [
{
"name": "routers",
"data": "172.16.16.1"
}
],
"reservations": [
{
"hw-address": "DE:F2:A8:0A:EC:7D",
"ip-address": "172.16.16.10"
}
]
},
{
"subnet": "172.16.32.0/20",
"pools": [ { "pool": "172.16.32.20 - 172.16.47.254" } ],
"option-data": [
{
"name": "routers",
"data": "172.16.32.1"
}
]
},
{
"subnet": "192.168.101.2/24",
"pools": [ { "pool": "192.168.101.30 - 192.168.101.254" } ],
"option-data": [
{
"name": "routers",
"data": "192.168.101.1"
}
]
}
],
"loggers": [
{
"name": "kea-dhcp4",
"output_options": [
{
"output": "/usr/local/var/log/kea-dhcp4.log"
}
],
"severity": "INFO",
"debuglevel": 0
}
]
}
}
/etc/network/interfaces
source /etc/network/interfaces.d/*
auto lo
iface lo inet loopback
allow-hotplug ens19
iface ens19 inet dhcp
auto ens18
iface ens18 inet static
address 172.16.0.2/20
gateway 172.16.0.1
auto ens20
iface ens20 inet static
address 172.16.16.2/20
gateway 172.16.16.1
auto ens21
iface ens21 inet static
address 172.16.32.2/20
gateway 172.16.32.1
auto ens22
iface ens22 inet static
address 192.168.101.2/24
gateway 192.168.101.1
|