mirror of
https://github.com/L-yang-yang/cugenopt.git
synced 2026-04-27 12:36:22 +02:00
Initial commit: cuGenOpt GPU optimization solver
This commit is contained in:
commit
fc5a0ff4af
117 changed files with 25545 additions and 0 deletions
413
benchmark/experiments/e2.1_custom_routing/gpu.cu
Normal file
413
benchmark/experiments/e2.1_custom_routing/gpu.cu
Normal file
|
|
@ -0,0 +1,413 @@
|
|||
/**
|
||||
* E2.1: 自定义路径规划 — OR-Tools Routing 无法支持的场景
|
||||
*
|
||||
* 场景 A:带优先级约束的 VRP (Priority-Constrained VRP)
|
||||
* - 约束扩展:penalty 中加入优先级偏序约束
|
||||
* - OR-Tools 的 Dimension 机制无法表达路径内偏序
|
||||
*
|
||||
* 场景 B:非线性运输成本 VRP (Nonlinear-Cost VRP)
|
||||
* - 目标扩展:边成本随累积负载非线性增长 cost = dist * (1 + 0.3 * load_ratio²)
|
||||
* - OR-Tools 的 ArcCostEvaluator 只接受 (from, to),无法访问累积负载
|
||||
*
|
||||
* 实例:基于 A-n32-k5
|
||||
* 时间预算:1s, 10s, 60s
|
||||
* 输出:CSV (instance,config,seed,obj,penalty,time_ms,gap_pct,generations,stop_reason)
|
||||
*/
|
||||
#include "bench_common.cuh"
|
||||
|
||||
// ============================================================
|
||||
// PriorityVRPProblem:在 VRPProblem 基础上增加优先级偏序约束
|
||||
// ============================================================
|
||||
struct PriorityVRPProblem : ProblemBase<PriorityVRPProblem, 8, 64> {
|
||||
const float* d_dist;
|
||||
const float* d_demand;
|
||||
const int* d_priority; // 0=low, 1=medium, 2=high
|
||||
const float* h_dist;
|
||||
int n;
|
||||
int stride;
|
||||
float capacity;
|
||||
int num_vehicles;
|
||||
int max_vehicles;
|
||||
GpuCache cache;
|
||||
|
||||
__device__ float compute_route_dist(const int* route, int size) const {
|
||||
if (size == 0) return 0.0f;
|
||||
float dist = 0.0f;
|
||||
int prev = 0;
|
||||
for (int j = 0; j < size; j++) {
|
||||
int node = route[j] + 1;
|
||||
dist += d_dist[prev * stride + node];
|
||||
prev = node;
|
||||
}
|
||||
dist += d_dist[prev * stride + 0];
|
||||
return dist;
|
||||
}
|
||||
|
||||
__device__ float calc_total_distance(const Sol& sol) const {
|
||||
float total = 0.0f;
|
||||
for (int r = 0; r < num_vehicles; r++)
|
||||
total += compute_route_dist(sol.data[r], sol.dim2_sizes[r]);
|
||||
return total;
|
||||
}
|
||||
|
||||
static constexpr ObjDef OBJ_DEFS[] = {
|
||||
{ObjDir::Minimize, 1.0f, 0.0f},
|
||||
};
|
||||
__device__ float compute_obj(int idx, const Sol& sol) const {
|
||||
return calc_total_distance(sol);
|
||||
}
|
||||
|
||||
__device__ float compute_penalty(const Sol& sol) const {
|
||||
float pen = 0.0f;
|
||||
int active = 0;
|
||||
for (int r = 0; r < num_vehicles; r++) {
|
||||
int size = sol.dim2_sizes[r];
|
||||
if (size == 0) continue;
|
||||
active++;
|
||||
|
||||
// 容量约束
|
||||
float load = 0.0f;
|
||||
for (int j = 0; j < size; j++)
|
||||
load += d_demand[sol.data[r][j]];
|
||||
if (load > capacity)
|
||||
pen += (load - capacity) * 100.0f;
|
||||
|
||||
// 优先级偏序约束:路径内高优先级必须在低优先级之前
|
||||
int min_prio_seen = 3;
|
||||
for (int j = 0; j < size; j++) {
|
||||
int p = d_priority[sol.data[r][j]];
|
||||
if (p > min_prio_seen) {
|
||||
// 当前客户优先级高于前面已出现的最低优先级 → 违规
|
||||
pen += (float)(p - min_prio_seen) * 50.0f;
|
||||
}
|
||||
if (p < min_prio_seen) min_prio_seen = p;
|
||||
}
|
||||
}
|
||||
if (active > max_vehicles)
|
||||
pen += (float)(active - max_vehicles) * 1000.0f;
|
||||
return pen;
|
||||
}
|
||||
|
||||
ProblemConfig config() const {
|
||||
ProblemConfig cfg;
|
||||
cfg.encoding = EncodingType::Permutation;
|
||||
cfg.dim1 = num_vehicles;
|
||||
cfg.dim2_default = 0;
|
||||
fill_obj_config(cfg);
|
||||
cfg.cross_row_prob = 0.3f;
|
||||
cfg.row_mode = RowMode::Partition;
|
||||
cfg.total_elements = n;
|
||||
return cfg;
|
||||
}
|
||||
|
||||
static constexpr size_t SMEM_LIMIT = 48 * 1024;
|
||||
size_t shared_mem_bytes() const {
|
||||
size_t total = (size_t)stride * stride * sizeof(float)
|
||||
+ (size_t)n * sizeof(float)
|
||||
+ (size_t)n * sizeof(int);
|
||||
return total <= SMEM_LIMIT ? total : 0;
|
||||
}
|
||||
size_t working_set_bytes() const {
|
||||
return (size_t)stride * stride * sizeof(float)
|
||||
+ (size_t)n * sizeof(float)
|
||||
+ (size_t)n * sizeof(int);
|
||||
}
|
||||
__device__ void load_shared(char* smem, int tid, int bsz) {
|
||||
float* sd = reinterpret_cast<float*>(smem);
|
||||
int dist_size = stride * stride;
|
||||
for (int i = tid; i < dist_size; i += bsz) sd[i] = d_dist[i];
|
||||
d_dist = sd;
|
||||
float* sdem = sd + dist_size;
|
||||
for (int i = tid; i < n; i += bsz) sdem[i] = d_demand[i];
|
||||
d_demand = sdem;
|
||||
int* spri = reinterpret_cast<int*>(sdem + n);
|
||||
for (int i = tid; i < n; i += bsz) spri[i] = d_priority[i];
|
||||
d_priority = spri;
|
||||
}
|
||||
|
||||
void init_relation_matrix(float* G, float* O, int N) const {
|
||||
if (!h_dist || N != n) return;
|
||||
float max_d = 0.0f;
|
||||
for (int i = 0; i < N; i++)
|
||||
for (int j = 0; j < N; j++) {
|
||||
float d = h_dist[(i + 1) * stride + (j + 1)];
|
||||
if (d > max_d) max_d = d;
|
||||
}
|
||||
if (max_d <= 0.0f) return;
|
||||
for (int i = 0; i < N; i++)
|
||||
for (int j = 0; j < N; j++) {
|
||||
if (i == j) continue;
|
||||
float d = h_dist[(i + 1) * stride + (j + 1)];
|
||||
float proximity = 1.0f - d / max_d;
|
||||
G[i * N + j] = proximity * 0.3f;
|
||||
O[i * N + j] = proximity * 0.1f;
|
||||
}
|
||||
}
|
||||
|
||||
static PriorityVRPProblem create(const float* h_dist_ptr, const float* h_demand,
|
||||
const int* h_priority, int n, float capacity,
|
||||
int num_vehicles, int max_vehicles) {
|
||||
PriorityVRPProblem prob;
|
||||
prob.n = n;
|
||||
prob.stride = n + 1;
|
||||
prob.capacity = capacity;
|
||||
prob.num_vehicles = num_vehicles;
|
||||
prob.max_vehicles = max_vehicles;
|
||||
prob.cache = GpuCache::disabled();
|
||||
prob.h_dist = h_dist_ptr;
|
||||
|
||||
int n_nodes = n + 1;
|
||||
float* dd;
|
||||
CUDA_CHECK(cudaMalloc(&dd, sizeof(float) * n_nodes * n_nodes));
|
||||
CUDA_CHECK(cudaMemcpy(dd, h_dist_ptr, sizeof(float) * n_nodes * n_nodes, cudaMemcpyHostToDevice));
|
||||
prob.d_dist = dd;
|
||||
|
||||
float* ddem;
|
||||
CUDA_CHECK(cudaMalloc(&ddem, sizeof(float) * n));
|
||||
CUDA_CHECK(cudaMemcpy(ddem, h_demand, sizeof(float) * n, cudaMemcpyHostToDevice));
|
||||
prob.d_demand = ddem;
|
||||
|
||||
int* dpri;
|
||||
CUDA_CHECK(cudaMalloc(&dpri, sizeof(int) * n));
|
||||
CUDA_CHECK(cudaMemcpy(dpri, h_priority, sizeof(int) * n, cudaMemcpyHostToDevice));
|
||||
prob.d_priority = dpri;
|
||||
|
||||
return prob;
|
||||
}
|
||||
|
||||
void destroy() {
|
||||
if (d_dist) { cudaFree(const_cast<float*>(d_dist)); d_dist = nullptr; }
|
||||
if (d_demand) { cudaFree(const_cast<float*>(d_demand)); d_demand = nullptr; }
|
||||
if (d_priority) { cudaFree(const_cast<int*>(d_priority)); d_priority = nullptr; }
|
||||
h_dist = nullptr;
|
||||
cache.destroy();
|
||||
}
|
||||
};
|
||||
|
||||
// ============================================================
|
||||
// NonlinearCostVRPProblem:边成本随累积负载非线性增长
|
||||
// cost(edge) = dist(i,j) * (1.0 + 0.3 * (load/capacity)²)
|
||||
// 模拟真实场景:车辆越重,油耗/电耗越高
|
||||
// OR-Tools 的 ArcCostEvaluator 只接受 (from, to),无法访问累积负载
|
||||
// ============================================================
|
||||
struct NonlinearCostVRPProblem : ProblemBase<NonlinearCostVRPProblem, 8, 64> {
|
||||
const float* d_dist;
|
||||
const float* d_demand;
|
||||
const float* h_dist;
|
||||
int n;
|
||||
int stride;
|
||||
float capacity;
|
||||
int num_vehicles;
|
||||
int max_vehicles;
|
||||
GpuCache cache;
|
||||
|
||||
__device__ float compute_route_nonlinear_cost(const int* route, int size) const {
|
||||
if (size == 0) return 0.0f;
|
||||
float cost = 0.0f;
|
||||
float load = 0.0f;
|
||||
int prev = 0;
|
||||
for (int j = 0; j < size; j++) {
|
||||
int cust = route[j];
|
||||
int node = cust + 1;
|
||||
load += d_demand[cust];
|
||||
float ratio = load / capacity;
|
||||
float edge_dist = d_dist[prev * stride + node];
|
||||
cost += edge_dist * (1.0f + 0.3f * ratio * ratio);
|
||||
prev = node;
|
||||
}
|
||||
cost += d_dist[prev * stride + 0]; // 返回 depot(空载,系数 1.0)
|
||||
return cost;
|
||||
}
|
||||
|
||||
__device__ float calc_total_cost(const Sol& sol) const {
|
||||
float total = 0.0f;
|
||||
for (int r = 0; r < num_vehicles; r++)
|
||||
total += compute_route_nonlinear_cost(sol.data[r], sol.dim2_sizes[r]);
|
||||
return total;
|
||||
}
|
||||
|
||||
static constexpr ObjDef OBJ_DEFS[] = {
|
||||
{ObjDir::Minimize, 1.0f, 0.0f},
|
||||
};
|
||||
__device__ float compute_obj(int idx, const Sol& sol) const {
|
||||
return calc_total_cost(sol);
|
||||
}
|
||||
|
||||
__device__ float compute_penalty(const Sol& sol) const {
|
||||
float pen = 0.0f;
|
||||
int active = 0;
|
||||
for (int r = 0; r < num_vehicles; r++) {
|
||||
int size = sol.dim2_sizes[r];
|
||||
if (size == 0) continue;
|
||||
active++;
|
||||
float load = 0.0f;
|
||||
for (int j = 0; j < size; j++)
|
||||
load += d_demand[sol.data[r][j]];
|
||||
if (load > capacity)
|
||||
pen += (load - capacity) * 100.0f;
|
||||
}
|
||||
if (active > max_vehicles)
|
||||
pen += (float)(active - max_vehicles) * 1000.0f;
|
||||
return pen;
|
||||
}
|
||||
|
||||
ProblemConfig config() const {
|
||||
ProblemConfig cfg;
|
||||
cfg.encoding = EncodingType::Permutation;
|
||||
cfg.dim1 = num_vehicles;
|
||||
cfg.dim2_default = 0;
|
||||
fill_obj_config(cfg);
|
||||
cfg.cross_row_prob = 0.3f;
|
||||
cfg.row_mode = RowMode::Partition;
|
||||
cfg.total_elements = n;
|
||||
return cfg;
|
||||
}
|
||||
|
||||
static constexpr size_t SMEM_LIMIT = 48 * 1024;
|
||||
size_t shared_mem_bytes() const {
|
||||
size_t total = (size_t)stride * stride * sizeof(float)
|
||||
+ (size_t)n * sizeof(float);
|
||||
return total <= SMEM_LIMIT ? total : 0;
|
||||
}
|
||||
size_t working_set_bytes() const {
|
||||
return (size_t)stride * stride * sizeof(float)
|
||||
+ (size_t)n * sizeof(float);
|
||||
}
|
||||
__device__ void load_shared(char* smem, int tid, int bsz) {
|
||||
float* sd = reinterpret_cast<float*>(smem);
|
||||
int dist_size = stride * stride;
|
||||
for (int i = tid; i < dist_size; i += bsz) sd[i] = d_dist[i];
|
||||
d_dist = sd;
|
||||
float* sdem = sd + dist_size;
|
||||
for (int i = tid; i < n; i += bsz) sdem[i] = d_demand[i];
|
||||
d_demand = sdem;
|
||||
}
|
||||
|
||||
void init_relation_matrix(float* G, float* O, int N) const {
|
||||
if (!h_dist || N != n) return;
|
||||
float max_d = 0.0f;
|
||||
for (int i = 0; i < N; i++)
|
||||
for (int j = 0; j < N; j++) {
|
||||
float d = h_dist[(i + 1) * stride + (j + 1)];
|
||||
if (d > max_d) max_d = d;
|
||||
}
|
||||
if (max_d <= 0.0f) return;
|
||||
for (int i = 0; i < N; i++)
|
||||
for (int j = 0; j < N; j++) {
|
||||
if (i == j) continue;
|
||||
float d = h_dist[(i + 1) * stride + (j + 1)];
|
||||
float proximity = 1.0f - d / max_d;
|
||||
G[i * N + j] = proximity * 0.3f;
|
||||
O[i * N + j] = proximity * 0.1f;
|
||||
}
|
||||
}
|
||||
|
||||
static NonlinearCostVRPProblem create(const float* h_dist_ptr, const float* h_demand,
|
||||
int n, float capacity,
|
||||
int num_vehicles, int max_vehicles) {
|
||||
NonlinearCostVRPProblem prob;
|
||||
prob.n = n;
|
||||
prob.stride = n + 1;
|
||||
prob.capacity = capacity;
|
||||
prob.num_vehicles = num_vehicles;
|
||||
prob.max_vehicles = max_vehicles;
|
||||
prob.cache = GpuCache::disabled();
|
||||
prob.h_dist = h_dist_ptr;
|
||||
|
||||
int n_nodes = n + 1;
|
||||
float* dd;
|
||||
CUDA_CHECK(cudaMalloc(&dd, sizeof(float) * n_nodes * n_nodes));
|
||||
CUDA_CHECK(cudaMemcpy(dd, h_dist_ptr, sizeof(float) * n_nodes * n_nodes, cudaMemcpyHostToDevice));
|
||||
prob.d_dist = dd;
|
||||
|
||||
float* ddem;
|
||||
CUDA_CHECK(cudaMalloc(&ddem, sizeof(float) * n));
|
||||
CUDA_CHECK(cudaMemcpy(ddem, h_demand, sizeof(float) * n, cudaMemcpyHostToDevice));
|
||||
prob.d_demand = ddem;
|
||||
|
||||
return prob;
|
||||
}
|
||||
|
||||
void destroy() {
|
||||
if (d_dist) { cudaFree(const_cast<float*>(d_dist)); d_dist = nullptr; }
|
||||
if (d_demand) { cudaFree(const_cast<float*>(d_demand)); d_demand = nullptr; }
|
||||
h_dist = nullptr;
|
||||
cache.destroy();
|
||||
}
|
||||
};
|
||||
|
||||
// ============================================================
|
||||
// A-n32-k5 优先级分配(确定性,可复现)
|
||||
// 31 个客户分为 3 档:high(2)=10, medium(1)=11, low(0)=10
|
||||
// 分配规则:客户 0-9 → high, 10-20 → medium, 21-30 → low
|
||||
// ============================================================
|
||||
static const int an32k5_priority[AN32K5_N] = {
|
||||
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // customers 0-9: high
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // customers 10-20: medium
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0 // customers 21-30: low
|
||||
};
|
||||
|
||||
static void run_priority_vrp() {
|
||||
fprintf(stderr, " [e2.1] Priority-VRP A-n32-k5\n");
|
||||
float dist[AN32K5_NODES * AN32K5_NODES];
|
||||
compute_euc2d_dist(dist, an32k5_coords, AN32K5_NODES);
|
||||
|
||||
float time_budgets[] = {1.0f, 10.0f, 60.0f};
|
||||
for (float t : time_budgets) {
|
||||
char cfg[64];
|
||||
snprintf(cfg, sizeof(cfg), "gensolver_pvrp_%.0fs", t);
|
||||
SolverConfig c = make_timed_config(t);
|
||||
bench_run_recreate("A-n32-k5-prio", cfg,
|
||||
[&]() {
|
||||
return PriorityVRPProblem::create(
|
||||
dist, an32k5_demands, an32k5_priority,
|
||||
AN32K5_N, 100.0f, 5, 5);
|
||||
}, c, 784.0f);
|
||||
}
|
||||
}
|
||||
|
||||
// 同时跑标准 VRP 作为 baseline(无优先级约束时的最优距离)
|
||||
static void run_standard_vrp() {
|
||||
fprintf(stderr, " [e2.1] Standard-VRP A-n32-k5 (baseline)\n");
|
||||
float dist[AN32K5_NODES * AN32K5_NODES];
|
||||
compute_euc2d_dist(dist, an32k5_coords, AN32K5_NODES);
|
||||
|
||||
float time_budgets[] = {1.0f, 10.0f, 60.0f};
|
||||
for (float t : time_budgets) {
|
||||
char cfg[64];
|
||||
snprintf(cfg, sizeof(cfg), "gensolver_vrp_%.0fs", t);
|
||||
SolverConfig c = make_timed_config(t);
|
||||
bench_run_recreate("A-n32-k5-std", cfg,
|
||||
[&]() {
|
||||
return VRPProblem::create(dist, an32k5_demands, AN32K5_N, 100.0f, 5, 5);
|
||||
}, c, 784.0f);
|
||||
}
|
||||
}
|
||||
|
||||
static void run_nonlinear_cost_vrp() {
|
||||
fprintf(stderr, " [e2.1] Nonlinear-Cost-VRP A-n32-k5\n");
|
||||
float dist[AN32K5_NODES * AN32K5_NODES];
|
||||
compute_euc2d_dist(dist, an32k5_coords, AN32K5_NODES);
|
||||
|
||||
float time_budgets[] = {1.0f, 10.0f, 60.0f};
|
||||
for (float t : time_budgets) {
|
||||
char cfg[64];
|
||||
snprintf(cfg, sizeof(cfg), "gensolver_nlvrp_%.0fs", t);
|
||||
SolverConfig c = make_timed_config(t);
|
||||
bench_run_recreate("A-n32-k5-nlcost", cfg,
|
||||
[&]() {
|
||||
return NonlinearCostVRPProblem::create(
|
||||
dist, an32k5_demands, AN32K5_N, 100.0f, 5, 5);
|
||||
}, c, 0.0f); // 无已知最优,gap 列输出 0
|
||||
}
|
||||
}
|
||||
|
||||
int main() {
|
||||
bench_init();
|
||||
bench_csv_header();
|
||||
run_standard_vrp();
|
||||
run_priority_vrp();
|
||||
run_nonlinear_cost_vrp();
|
||||
fprintf(stderr, "\n[e2.1] GPU side completed.\n");
|
||||
return 0;
|
||||
}
|
||||
173
benchmark/experiments/e2.1_custom_routing/routing_baseline.py
Normal file
173
benchmark/experiments/e2.1_custom_routing/routing_baseline.py
Normal file
|
|
@ -0,0 +1,173 @@
|
|||
"""
|
||||
E2.1: 自定义路径规划 — OR-Tools Routing baseline
|
||||
|
||||
OR-Tools Routing 的两个建模限制:
|
||||
A. 无法表达路径内优先级偏序约束(Dimension 只支持累积约束)
|
||||
B. 无法使用负载依赖的非线性边成本(ArcCostEvaluator 只接受 from/to)
|
||||
|
||||
因此只能求解标准 CVRP,然后事后:
|
||||
- 统计优先级违规数量
|
||||
- 用非线性公式重新计算真实成本
|
||||
|
||||
用法:python routing_baseline.py
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
from ortools.constraint_solver import routing_enums_pb2, pywrapcp
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..", "common"))
|
||||
from instances import load_vrp, euc2d_dist_matrix, VRP_INSTANCES
|
||||
|
||||
TIME_BUDGETS = [1, 10, 60]
|
||||
|
||||
# 与 gpu.cu 一致的优先级分配
|
||||
# 客户 0-9: high(2), 10-20: medium(1), 21-30: low(0)
|
||||
PRIORITIES = (
|
||||
[2] * 10 + # customers 0-9: high
|
||||
[1] * 11 + # customers 10-20: medium
|
||||
[0] * 10 # customers 21-30: low
|
||||
)
|
||||
|
||||
|
||||
def count_priority_violations(routes, priorities):
|
||||
"""统计所有路径中的优先级违规数量。
|
||||
违规定义:同一路径内,高优先级客户出现在低优先级客户之后。
|
||||
"""
|
||||
violations = 0
|
||||
for route in routes:
|
||||
min_prio_seen = 3
|
||||
for node in route:
|
||||
p = priorities[node]
|
||||
if p > min_prio_seen:
|
||||
violations += 1
|
||||
if p < min_prio_seen:
|
||||
min_prio_seen = p
|
||||
return violations
|
||||
|
||||
|
||||
def calc_nonlinear_cost(routes, dist, demands, capacity):
|
||||
"""用非线性公式重新计算路径成本。
|
||||
cost(edge) = dist(i,j) * (1.0 + 0.3 * (load/capacity)²)
|
||||
与 gpu.cu 中 NonlinearCostVRPProblem::compute_route_nonlinear_cost 一致。
|
||||
dist 矩阵含 depot(index 0),客户编号 0-based → node = cust + 1。
|
||||
"""
|
||||
total = 0.0
|
||||
for route in routes:
|
||||
load = 0.0
|
||||
prev = 0 # depot
|
||||
for cust in route:
|
||||
node = cust + 1
|
||||
load += demands[node]
|
||||
ratio = load / capacity
|
||||
total += dist[prev][node] * (1.0 + 0.3 * ratio * ratio)
|
||||
prev = node
|
||||
total += dist[prev][0] # 返回 depot,空载系数 1.0
|
||||
return total
|
||||
|
||||
|
||||
def solve_cvrp_routing(dist, demands, n, n_vehicles, capacity, time_limit_sec):
|
||||
"""标准 CVRP 求解(无优先级约束)"""
|
||||
manager = pywrapcp.RoutingIndexManager(n, n_vehicles, 0)
|
||||
routing = pywrapcp.RoutingModel(manager)
|
||||
|
||||
def dist_callback(from_idx, to_idx):
|
||||
return dist[manager.IndexToNode(from_idx)][manager.IndexToNode(to_idx)]
|
||||
|
||||
transit_id = routing.RegisterTransitCallback(dist_callback)
|
||||
routing.SetArcCostEvaluatorOfAllVehicles(transit_id)
|
||||
|
||||
def demand_callback(idx):
|
||||
return demands[manager.IndexToNode(idx)]
|
||||
|
||||
demand_id = routing.RegisterUnaryTransitCallback(demand_callback)
|
||||
routing.AddDimensionWithVehicleCapacity(
|
||||
demand_id, 0, [capacity] * n_vehicles, True, "Cap")
|
||||
|
||||
params = pywrapcp.DefaultRoutingSearchParameters()
|
||||
params.first_solution_strategy = (
|
||||
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
|
||||
params.local_search_metaheuristic = (
|
||||
routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
|
||||
params.time_limit.seconds = time_limit_sec
|
||||
|
||||
t0 = time.perf_counter()
|
||||
solution = routing.SolveWithParameters(params)
|
||||
elapsed_ms = (time.perf_counter() - t0) * 1000.0
|
||||
|
||||
if not solution:
|
||||
return float("inf"), elapsed_ms, [], "infeasible"
|
||||
|
||||
obj = solution.ObjectiveValue()
|
||||
routes = []
|
||||
for v in range(n_vehicles):
|
||||
route = []
|
||||
idx = routing.Start(v)
|
||||
while not routing.IsEnd(idx):
|
||||
node = manager.IndexToNode(idx)
|
||||
if node != 0:
|
||||
route.append(node - 1) # 转为 0-based 客户编号
|
||||
idx = solution.Value(routing.NextVar(idx))
|
||||
routes.append(route)
|
||||
|
||||
return obj, elapsed_ms, routes, "time"
|
||||
|
||||
|
||||
def print_row(instance, config, obj, elapsed_ms, optimal, violations, reason):
|
||||
if obj == float("inf"):
|
||||
print(f"{instance},{config},0,inf,0.00,{elapsed_ms:.1f},inf,0,{reason}")
|
||||
else:
|
||||
gap = (obj - optimal) / optimal * 100.0 if optimal > 0 else 0.0
|
||||
print(f"{instance},{config},0,{obj:.2f},0.00,{elapsed_ms:.1f},"
|
||||
f"{gap:.2f},0,{reason}_v{violations}")
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def main():
|
||||
print("instance,config,seed,obj,penalty,time_ms,gap_pct,generations,stop_reason")
|
||||
|
||||
for entry in VRP_INSTANCES:
|
||||
inst = load_vrp(entry)
|
||||
n_customers = inst["n"] - 1
|
||||
print(f" [e2.1-routing] VRP {inst['name']} (n={inst['n']})",
|
||||
file=sys.stderr)
|
||||
dist = euc2d_dist_matrix(inst["coords"])
|
||||
demands_full = [0] + list(inst["demands"]) # index 0 = depot
|
||||
priorities = PRIORITIES[:n_customers]
|
||||
|
||||
for t in TIME_BUDGETS:
|
||||
obj, ms, routes, reason = solve_cvrp_routing(
|
||||
dist, demands_full,
|
||||
inst["n"], inst["n_vehicles"], inst["capacity"], t)
|
||||
|
||||
violations = count_priority_violations(routes, priorities) if routes else -1
|
||||
|
||||
# 场景 A: 优先级约束
|
||||
print_row(
|
||||
f"{inst['name']}-prio",
|
||||
f"routing_GLS_{t}s",
|
||||
obj, ms, inst["optimal"], violations, reason)
|
||||
|
||||
# 标准 VRP baseline
|
||||
print_row(
|
||||
f"{inst['name']}-std",
|
||||
f"routing_GLS_{t}s",
|
||||
obj, ms, inst["optimal"], 0, reason)
|
||||
|
||||
# 场景 B: 非线性成本(用 OR-Tools 的解重新计算真实成本)
|
||||
if routes:
|
||||
nl_cost = calc_nonlinear_cost(
|
||||
routes, dist, demands_full, inst["capacity"])
|
||||
print_row(
|
||||
f"{inst['name']}-nlcost",
|
||||
f"routing_GLS_{t}s",
|
||||
nl_cost, ms, 0, 0, reason)
|
||||
else:
|
||||
print_row(
|
||||
f"{inst['name']}-nlcost",
|
||||
f"routing_GLS_{t}s",
|
||||
float("inf"), ms, 0, 0, reason)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Loading…
Add table
Add a link
Reference in a new issue