krahets 2 months ago
parent 03a6cd27ca
commit 4ac4f94628

@ -1443,8 +1443,8 @@ comments: true
/* 双向链表节点类 */ /* 双向链表节点类 */
class ListNode { class ListNode {
int val; // 节点值 int val; // 节点值
ListNode next; // 指向后继节点的引用 ListNode? next; // 指向后继节点的引用
ListNode prev; // 指向前驱节点的引用 ListNode? prev; // 指向前驱节点的引用
ListNode(this.val, [this.next, this.prev]); // 构造函数 ListNode(this.val, [this.next, this.prev]); // 构造函数
} }
``` ```

@ -1721,7 +1721,7 @@ comments: true
remove(index) { remove(index) {
if (index < 0 || index >= this.#size) throw new Error('索引越界'); if (index < 0 || index >= this.#size) throw new Error('索引越界');
let num = this.#arr[index]; let num = this.#arr[index];
// 将索引 index 之后的元素都向前移动一位 // 将索引 index 之后的元素都向前移动一位
for (let j = index; j < this.#size - 1; j++) { for (let j = index; j < this.#size - 1; j++) {
this.#arr[j] = this.#arr[j + 1]; this.#arr[j] = this.#arr[j + 1];
} }

@ -29,7 +29,7 @@ comments: true
链表由节点组成,节点之间通过引用(指针)连接,各个节点可以存储不同类型的数据,例如 `int`、`double`、`string`、`object` 等。 链表由节点组成,节点之间通过引用(指针)连接,各个节点可以存储不同类型的数据,例如 `int`、`double`、`string`、`object` 等。
相对地,数组元素则必须是相同类型的,这样才能通过计算偏移量来获取对应元素位置。例如,数组同时包含 `int``long` 两种类型,单个元素分别占用 4 字节 和 8 字节 ,此时就不能用以下公式计算偏移量了,因为数组中包含了两种“元素长度”。 相对地,数组元素则必须是相同类型的,这样才能通过计算偏移量来获取对应元素位置。例如,数组同时包含 `int``long` 两种类型,单个元素分别占用 4 字节和 8 字节 ,此时就不能用以下公式计算偏移量了,因为数组中包含了两种“元素长度”。
```shell ```shell
# 元素内存地址 = 数组内存地址(首元素内存地址) + 元素长度 * 元素索引 # 元素内存地址 = 数组内存地址(首元素内存地址) + 元素长度 * 元素索引

@ -532,11 +532,7 @@ comments: true
) { ) {
// 当放置完所有行时,记录解 // 当放置完所有行时,记录解
if row == n { if row == n {
let mut copy_state: Vec<Vec<String>> = Vec::new(); res.push(state.clone());
for s_row in state.clone() {
copy_state.push(s_row);
}
res.push(copy_state);
return; return;
} }
// 遍历所有列 // 遍历所有列
@ -547,12 +543,12 @@ comments: true
// 剪枝:不允许该格子所在列、主对角线、次对角线上存在皇后 // 剪枝:不允许该格子所在列、主对角线、次对角线上存在皇后
if !cols[col] && !diags1[diag1] && !diags2[diag2] { if !cols[col] && !diags1[diag1] && !diags2[diag2] {
// 尝试:将皇后放置在该格子 // 尝试:将皇后放置在该格子
state.get_mut(row).unwrap()[col] = "Q".into(); state[row][col] = "Q".into();
(cols[col], diags1[diag1], diags2[diag2]) = (true, true, true); (cols[col], diags1[diag1], diags2[diag2]) = (true, true, true);
// 放置下一行 // 放置下一行
backtrack(row + 1, n, state, res, cols, diags1, diags2); backtrack(row + 1, n, state, res, cols, diags1, diags2);
// 回退:将该格子恢复为空位 // 回退:将该格子恢复为空位
state.get_mut(row).unwrap()[col] = "#".into(); state[row][col] = "#".into();
(cols[col], diags1[diag1], diags2[diag2]) = (false, false, false); (cols[col], diags1[diag1], diags2[diag2]) = (false, false, false);
} }
} }
@ -561,14 +557,7 @@ comments: true
/* 求解 n 皇后 */ /* 求解 n 皇后 */
fn n_queens(n: usize) -> Vec<Vec<Vec<String>>> { fn n_queens(n: usize) -> Vec<Vec<Vec<String>>> {
// 初始化 n*n 大小的棋盘,其中 'Q' 代表皇后,'#' 代表空位 // 初始化 n*n 大小的棋盘,其中 'Q' 代表皇后,'#' 代表空位
let mut state: Vec<Vec<String>> = Vec::new(); let mut state: Vec<Vec<String>> = vec![vec!["#".to_string(); n]; n];
for _ in 0..n {
let mut row: Vec<String> = Vec::new();
for _ in 0..n {
row.push("#".into());
}
state.push(row);
}
let mut cols = vec![false; n]; // 记录列是否有皇后 let mut cols = vec![false; n]; // 记录列是否有皇后
let mut diags1 = vec![false; 2 * n - 1]; // 记录主对角线上是否有皇后 let mut diags1 = vec![false; 2 * n - 1]; // 记录主对角线上是否有皇后
let mut diags2 = vec![false; 2 * n - 1]; // 记录次对角线上是否有皇后 let mut diags2 = vec![false; 2 * n - 1]; // 记录次对角线上是否有皇后

@ -355,7 +355,7 @@ comments: true
```rust title="subset_sum_i_naive.rs" ```rust title="subset_sum_i_naive.rs"
/* 回溯算法:子集和 I */ /* 回溯算法:子集和 I */
fn backtrack( fn backtrack(
mut state: Vec<i32>, state: &mut Vec<i32>,
target: i32, target: i32,
total: i32, total: i32,
choices: &[i32], choices: &[i32],
@ -363,7 +363,7 @@ comments: true
) { ) {
// 子集和等于 target 时,记录解 // 子集和等于 target 时,记录解
if total == target { if total == target {
res.push(state); res.push(state.clone());
return; return;
} }
// 遍历所有选择 // 遍历所有选择
@ -375,7 +375,7 @@ comments: true
// 尝试:做出选择,更新元素和 total // 尝试:做出选择,更新元素和 total
state.push(choices[i]); state.push(choices[i]);
// 进行下一轮选择 // 进行下一轮选择
backtrack(state.clone(), target, total + choices[i], choices, res); backtrack(state, target, total + choices[i], choices, res);
// 回退:撤销选择,恢复到之前的状态 // 回退:撤销选择,恢复到之前的状态
state.pop(); state.pop();
} }
@ -383,10 +383,10 @@ comments: true
/* 求解子集和 I包含重复子集 */ /* 求解子集和 I包含重复子集 */
fn subset_sum_i_naive(nums: &[i32], target: i32) -> Vec<Vec<i32>> { fn subset_sum_i_naive(nums: &[i32], target: i32) -> Vec<Vec<i32>> {
let state = Vec::new(); // 状态(子集) let mut state = Vec::new(); // 状态(子集)
let total = 0; // 子集和 let total = 0; // 子集和
let mut res = Vec::new(); // 结果列表(子集列表) let mut res = Vec::new(); // 结果列表(子集列表)
backtrack(state, target, total, nums, &mut res); backtrack(&mut state, target, total, nums, &mut res);
res res
} }
``` ```
@ -912,7 +912,7 @@ comments: true
```rust title="subset_sum_i.rs" ```rust title="subset_sum_i.rs"
/* 回溯算法:子集和 I */ /* 回溯算法:子集和 I */
fn backtrack( fn backtrack(
mut state: Vec<i32>, state: &mut Vec<i32>,
target: i32, target: i32,
choices: &[i32], choices: &[i32],
start: usize, start: usize,
@ -920,7 +920,7 @@ comments: true
) { ) {
// 子集和等于 target 时,记录解 // 子集和等于 target 时,记录解
if target == 0 { if target == 0 {
res.push(state); res.push(state.clone());
return; return;
} }
// 遍历所有选择 // 遍历所有选择
@ -934,7 +934,7 @@ comments: true
// 尝试:做出选择,更新 target, start // 尝试:做出选择,更新 target, start
state.push(choices[i]); state.push(choices[i]);
// 进行下一轮选择 // 进行下一轮选择
backtrack(state.clone(), target - choices[i], choices, i, res); backtrack(state, target - choices[i], choices, i, res);
// 回退:撤销选择,恢复到之前的状态 // 回退:撤销选择,恢复到之前的状态
state.pop(); state.pop();
} }
@ -942,11 +942,11 @@ comments: true
/* 求解子集和 I */ /* 求解子集和 I */
fn subset_sum_i(nums: &mut [i32], target: i32) -> Vec<Vec<i32>> { fn subset_sum_i(nums: &mut [i32], target: i32) -> Vec<Vec<i32>> {
let state = Vec::new(); // 状态(子集) let mut state = Vec::new(); // 状态(子集)
nums.sort(); // 对 nums 进行排序 nums.sort(); // 对 nums 进行排序
let start = 0; // 遍历起始点 let start = 0; // 遍历起始点
let mut res = Vec::new(); // 结果列表(子集列表) let mut res = Vec::new(); // 结果列表(子集列表)
backtrack(state, target, nums, start, &mut res); backtrack(&mut state, target, nums, start, &mut res);
res res
} }
``` ```
@ -1512,7 +1512,7 @@ comments: true
```rust title="subset_sum_ii.rs" ```rust title="subset_sum_ii.rs"
/* 回溯算法:子集和 II */ /* 回溯算法:子集和 II */
fn backtrack( fn backtrack(
mut state: Vec<i32>, state: &mut Vec<i32>,
target: i32, target: i32,
choices: &[i32], choices: &[i32],
start: usize, start: usize,
@ -1520,7 +1520,7 @@ comments: true
) { ) {
// 子集和等于 target 时,记录解 // 子集和等于 target 时,记录解
if target == 0 { if target == 0 {
res.push(state); res.push(state.clone());
return; return;
} }
// 遍历所有选择 // 遍历所有选择
@ -1539,7 +1539,7 @@ comments: true
// 尝试:做出选择,更新 target, start // 尝试:做出选择,更新 target, start
state.push(choices[i]); state.push(choices[i]);
// 进行下一轮选择 // 进行下一轮选择
backtrack(state.clone(), target - choices[i], choices, i, res); backtrack(state, target - choices[i], choices, i + 1, res);
// 回退:撤销选择,恢复到之前的状态 // 回退:撤销选择,恢复到之前的状态
state.pop(); state.pop();
} }
@ -1547,11 +1547,11 @@ comments: true
/* 求解子集和 II */ /* 求解子集和 II */
fn subset_sum_ii(nums: &mut [i32], target: i32) -> Vec<Vec<i32>> { fn subset_sum_ii(nums: &mut [i32], target: i32) -> Vec<Vec<i32>> {
let state = Vec::new(); // 状态(子集) let mut state = Vec::new(); // 状态(子集)
nums.sort(); // 对 nums 进行排序 nums.sort(); // 对 nums 进行排序
let start = 0; // 遍历起始点 let start = 0; // 遍历起始点
let mut res = Vec::new(); // 结果列表(子集列表) let mut res = Vec::new(); // 结果列表(子集列表)
backtrack(state, target, nums, start, &mut res); backtrack(&mut state, target, nums, start, &mut res);
res res
} }
``` ```

@ -54,7 +54,7 @@ $$
1. 初始状态下,指针 $i$ 和 $j$ 分列数组两端。 1. 初始状态下,指针 $i$ 和 $j$ 分列数组两端。
2. 计算当前状态的容量 $cap[i, j]$ ,并更新最大容量。 2. 计算当前状态的容量 $cap[i, j]$ ,并更新最大容量。
3. 比较板 $i$ 和 板 $j$ 的高度,并将短板向内移动一格。 3. 比较板 $i$ 和板 $j$ 的高度,并将短板向内移动一格。
4. 循环执行第 `2.` 步和第 `3.` 步,直至 $i$ 和 $j$ 相遇时结束。 4. 循环执行第 `2.` 步和第 `3.` 步,直至 $i$ 和 $j$ 相遇时结束。
=== "<1>" === "<1>"

@ -12,7 +12,7 @@ comments: true
1. 翻开字典约一半的页数,查看该页的首字母是什么,假设首字母为 $m$ 。 1. 翻开字典约一半的页数,查看该页的首字母是什么,假设首字母为 $m$ 。
2. 由于在拼音字母表中 $r$ 位于 $m$ 之后,所以排除字典前半部分,查找范围缩小到后半部分。 2. 由于在拼音字母表中 $r$ 位于 $m$ 之后,所以排除字典前半部分,查找范围缩小到后半部分。
3. 不断重复步骤 `1.` 步骤 `2.` ,直至找到拼音首字母为 $r$ 的页码为止。 3. 不断重复步骤 `1.` 和步骤 `2.` ,直至找到拼音首字母为 $r$ 的页码为止。
=== "<1>" === "<1>"
![查字典步骤](algorithms_are_everywhere.assets/binary_search_dictionary_step1.png){ class="animation-figure" } ![查字典步骤](algorithms_are_everywhere.assets/binary_search_dictionary_step1.png){ class="animation-figure" }

@ -69,26 +69,19 @@ comments: true
=== "C++" === "C++"
```cpp title="quick_sort.cpp" ```cpp title="quick_sort.cpp"
/* 元素交换 */
void swap(vector<int> &nums, int i, int j) {
int tmp = nums[i];
nums[i] = nums[j];
nums[j] = tmp;
}
/* 哨兵划分 */ /* 哨兵划分 */
int partition(vector<int> &nums, int left, int right) { int partition(vector<int> &nums, int left, int right) {
// 以 nums[left] 为基准数 // 以 nums[left] 为基准数
int i = left, j = right; int i = left, j = right;
while (i < j) { while (i < j) {
while (i < j && nums[j] >= nums[left]) while (i < j && nums[j] >= nums[left])
j--; // 从右向左找首个小于基准数的元素 j--; // 从右向左找首个小于基准数的元素
while (i < j && nums[i] <= nums[left]) while (i < j && nums[i] <= nums[left])
i++; // 从左向右找首个大于基准数的元素 i++; // 从左向右找首个大于基准数的元素
swap(nums, i, j); // 交换这两个元素 swap(nums[i], nums[j]); // 交换这两个元素
} }
swap(nums, i, left); // 将基准数交换至两子数组的分界线 swap(nums[i], nums[left]); // 将基准数交换至两子数组的分界线
return i; // 返回基准数的索引 return i; // 返回基准数的索引
} }
``` ```
@ -721,18 +714,18 @@ comments: true
// 选取三个候选元素的中位数 // 选取三个候选元素的中位数
int med = medianThree(nums, left, (left + right) / 2, right); int med = medianThree(nums, left, (left + right) / 2, right);
// 将中位数交换至数组最左端 // 将中位数交换至数组最左端
swap(nums, left, med); swap(nums[left], nums[med]);
// 以 nums[left] 为基准数 // 以 nums[left] 为基准数
int i = left, j = right; int i = left, j = right;
while (i < j) { while (i < j) {
while (i < j && nums[j] >= nums[left]) while (i < j && nums[j] >= nums[left])
j--; // 从右向左找首个小于基准数的元素 j--; // 从右向左找首个小于基准数的元素
while (i < j && nums[i] <= nums[left]) while (i < j && nums[i] <= nums[left])
i++; // 从左向右找首个大于基准数的元素 i++; // 从左向右找首个大于基准数的元素
swap(nums, i, j); // 交换这两个元素 swap(nums[i], nums[j]); // 交换这两个元素
} }
swap(nums, i, left); // 将基准数交换至两子数组的分界线 swap(nums[i], nums[left]); // 将基准数交换至两子数组的分界线
return i; // 返回基准数的索引 return i; // 返回基准数的索引
} }
``` ```

@ -665,7 +665,7 @@ comments: true
### 2. &nbsp; 完全二叉树 ### 2. &nbsp; 完全二叉树
如图 7-5 所示,<u>完全二叉树complete binary tree</u>只有最底层的节点未被填满,且最底层节点尽量靠左填充。 如图 7-5 所示,<u>完全二叉树complete binary tree</u>只有最底层的节点未被填满,且最底层节点尽量靠左填充。请注意,完美二叉树也是一棵完全二叉树。
![完全二叉树](binary_tree.assets/complete_binary_tree.png){ class="animation-figure" } ![完全二叉树](binary_tree.assets/complete_binary_tree.png){ class="animation-figure" }

@ -4,7 +4,7 @@ comments: true
# 2.3 &nbsp; Time complexity # 2.3 &nbsp; Time complexity
Time complexity is a concept used to measure how the run time of an algorithm increases with the size of the input data. Understanding time complexity is crucial for accurately assessing the efficiency of an algorithm. The runtime can intuitively assess the efficiency of an algorithm. How can we accurately estimate the runtime of a piece of an algorithm?
1. **Determining the Running Platform**: This includes hardware configuration, programming language, system environment, etc., all of which can affect the efficiency of code execution. 1. **Determining the Running Platform**: This includes hardware configuration, programming language, system environment, etc., all of which can affect the efficiency of code execution.
2. **Evaluating the Run Time for Various Computational Operations**: For instance, an addition operation `+` might take 1 ns, a multiplication operation `*` might take 10 ns, a print operation `print()` might take 5 ns, etc. 2. **Evaluating the Run Time for Various Computational Operations**: For instance, an addition operation `+` might take 1 ns, a multiplication operation `*` might take 10 ns, a print operation `print()` might take 5 ns, etc.

@ -4,7 +4,7 @@ comments: true
# 9.2 &nbsp; Basic operations on graphs # 9.2 &nbsp; Basic operations on graphs
The basic operations on graphs can be divided into operations on "edges" and operations on "vertices". Under the two representation methods of "adjacency matrix" and "adjacency list", the implementation methods are different. The basic operations on graphs can be divided into operations on "edges" and operations on "vertices". Under the two representation methods of "adjacency matrix" and "adjacency list", the implementations are different.
## 9.2.1 &nbsp; Implementation based on adjacency matrix ## 9.2.1 &nbsp; Implementation based on adjacency matrix

@ -4,7 +4,7 @@ comments: true
# 9.3 &nbsp; Graph traversal # 9.3 &nbsp; Graph traversal
Trees represent a "one-to-many" relationship, while graphs have a higher degree of freedom and can represent any "many-to-many" relationship. Therefore, we can consider trees as a special case of graphs. Clearly, **tree traversal operations are also a special case of graph traversal operations**. Trees represent a "one-to-many" relationship, while graphs have a higher degree of freedom and can represent any "many-to-many" relationship. Therefore, we can consider tree as a special case of graph. Clearly, **tree traversal operations are also a special case of graph traversal operations**.
Both graphs and trees require the application of search algorithms to implement traversal operations. Graph traversal can be divided into two types: <u>Breadth-First Search (BFS)</u> and <u>Depth-First Search (DFS)</u>. Both graphs and trees require the application of search algorithms to implement traversal operations. Graph traversal can be divided into two types: <u>Breadth-First Search (BFS)</u> and <u>Depth-First Search (DFS)</u>.
@ -18,7 +18,7 @@ Both graphs and trees require the application of search algorithms to implement
### 1. &nbsp; Algorithm implementation ### 1. &nbsp; Algorithm implementation
BFS is usually implemented with the help of a queue, as shown in the code below. The queue has a "first in, first out" property, which aligns with the BFS idea of traversing "from near to far". BFS is usually implemented with the help of a queue, as shown in the code below. The queue is "first in, first out", which aligns with the BFS idea of traversing "from near to far".
1. Add the starting vertex `startVet` to the queue and start the loop. 1. Add the starting vertex `startVet` to the queue and start the loop.
2. In each iteration of the loop, pop the vertex at the front of the queue and record it as visited, then add all adjacent vertices of that vertex to the back of the queue. 2. In each iteration of the loop, pop the vertex at the front of the queue and record it as visited, then add all adjacent vertices of that vertex to the back of the queue.
@ -184,7 +184,7 @@ To prevent revisiting vertices, we use a hash set `visited` to record which node
[class]{}-[func]{graphBFS} [class]{}-[func]{graphBFS}
``` ```
The code is relatively abstract, it is suggested to compare with Figure 9-10 to deepen the understanding. The code is relatively abstract, you can compare it with Figure 9-10 to get a better understanding.
=== "<1>" === "<1>"
![Steps of breadth-first search of a graph](graph_traversal.assets/graph_bfs_step1.png){ class="animation-figure" } ![Steps of breadth-first search of a graph](graph_traversal.assets/graph_bfs_step1.png){ class="animation-figure" }
@ -223,7 +223,7 @@ The code is relatively abstract, it is suggested to compare with Figure 9-10 to
!!! question "Is the sequence of breadth-first traversal unique?" !!! question "Is the sequence of breadth-first traversal unique?"
Not unique. Breadth-first traversal only requires traversing in a "from near to far" order, **and the traversal order of multiple vertices at the same distance can be arbitrarily shuffled**. For example, in Figure 9-10, the visitation order of vertices $1$ and $3$ can be switched, as can the order of vertices $2$, $4$, and $6$. Not unique. Breadth-first traversal only requires traversing in a "near to far" order, **and the traversal order of the vertices with the same distance can be arbitrary**. For example, in Figure 9-10, the visit order of vertices $1$ and $3$ can be swapped, as can the order of vertices $2$, $4$, and $6$.
### 2. &nbsp; Complexity analysis ### 2. &nbsp; Complexity analysis
@ -233,7 +233,7 @@ The code is relatively abstract, it is suggested to compare with Figure 9-10 to
## 9.3.2 &nbsp; Depth-first search ## 9.3.2 &nbsp; Depth-first search
**Depth-first search is a traversal method that prioritizes going as far as possible and then backtracks when no further paths are available**. As shown in Figure 9-11, starting from the top left vertex, visit some adjacent vertex of the current vertex until no further path is available, then return and continue until all vertices are traversed. **Depth-first search is a traversal method that prioritizes going as far as possible and then backtracks when no further path is available**. As shown in Figure 9-11, starting from the top left vertex, visit some adjacent vertex of the current vertex until no further path is available, then return and continue until all vertices are traversed.
![Depth-first traversal of a graph](graph_traversal.assets/graph_dfs.png){ class="animation-figure" } ![Depth-first traversal of a graph](graph_traversal.assets/graph_dfs.png){ class="animation-figure" }

@ -4,7 +4,7 @@ comments: true
# 6.1 &nbsp; Hash table # 6.1 &nbsp; Hash table
A <u>hash table</u>, also known as a <u>hash map</u>, is a data structure that establishes a mapping between keys and values, enabling efficient element retrieval. Specifically, when we input a `key` into the hash table, we can retrive the corresponding `value` in $O(1)$ time complexity. A <u>hash table</u>, also known as a <u>hash map</u>, is a data structure that establishes a mapping between keys and values, enabling efficient element retrieval. Specifically, when we input a `key` into the hash table, we can retrieve the corresponding `value` in $O(1)$ time complexity.
As shown in Figure 6-1, given $n$ students, each student has two data fields: "Name" and "Student ID". If we want to implement a query function that takes a student ID as input and returns the corresponding name, we can use the hash table shown in Figure 6-1. As shown in Figure 6-1, given $n$ students, each student has two data fields: "Name" and "Student ID". If we want to implement a query function that takes a student ID as input and returns the corresponding name, we can use the hash table shown in Figure 6-1.
@ -14,9 +14,9 @@ As shown in Figure 6-1, given $n$ students, each student has two data fields: "N
In addition to hash tables, arrays and linked lists can also be used to implement query functionality, but the time complexity is different. Their efficiency is compared in Table 6-1: In addition to hash tables, arrays and linked lists can also be used to implement query functionality, but the time complexity is different. Their efficiency is compared in Table 6-1:
- **Inserting elements**: Simply append the element to the tail of the array (or linked list). The time complexity of this operation is $O(1)$. - **Inserting an element**: Simply append the element to the tail of the array (or linked list). The time complexity of this operation is $O(1)$.
- **Searching for elements**: As the array (or linked list) is unsorted, searching for an element requires traversing through all of the elements. The time complexity of this operation is $O(n)$. - **Searching for an element**: As the array (or linked list) is unsorted, searching for an element requires traversing through all of the elements. The time complexity of this operation is $O(n)$.
- **Deleting elements**: To remove an element, we first need to locate it. Then, we delete it from the array (or linked list). The time complexity of this operation is $O(n)$. - **Deleting an element**: To remove an element, we first need to locate it. Then, we delete it from the array (or linked list). The time complexity of this operation is $O(n)$.
<p align="center"> Table 6-1 &nbsp; Comparison of time efficiency for common operations </p> <p align="center"> Table 6-1 &nbsp; Comparison of time efficiency for common operations </p>
@ -30,7 +30,7 @@ In addition to hash tables, arrays and linked lists can also be used to implemen
</div> </div>
It can be seen that **the time complexity for operations (insertion, deletion, searching, and modification) in a hash table is $O(1)$**, which is highly efficient. As observed, **the time complexity for operations (insertion, deletion, searching, and modification) in a hash table is $O(1)$**, which is highly efficient.
## 6.1.1 &nbsp; Common operations of hash table ## 6.1.1 &nbsp; Common operations of hash table
@ -66,7 +66,7 @@ Common operations of a hash table include: initialization, querying, adding key-
unordered_map<int, string> map; unordered_map<int, string> map;
/* Add operation */ /* Add operation */
// Add key-value pair (key, value) to the hash table // Add key-value pair (key, value) to hash table
map[12836] = "Xiao Ha"; map[12836] = "Xiao Ha";
map[15937] = "Xiao Luo"; map[15937] = "Xiao Luo";
map[16750] = "Xiao Suan"; map[16750] = "Xiao Suan";
@ -89,7 +89,7 @@ Common operations of a hash table include: initialization, querying, adding key-
Map<Integer, String> map = new HashMap<>(); Map<Integer, String> map = new HashMap<>();
/* Add operation */ /* Add operation */
// Add key-value pair (key, value) to the hash table // Add key-value pair (key, value) to hash table
map.put(12836, "Xiao Ha"); map.put(12836, "Xiao Ha");
map.put(15937, "Xiao Luo"); map.put(15937, "Xiao Luo");
map.put(16750, "Xiao Suan"); map.put(16750, "Xiao Suan");
@ -111,7 +111,7 @@ Common operations of a hash table include: initialization, querying, adding key-
/* Initialize hash table */ /* Initialize hash table */
Dictionary<int, string> map = new() { Dictionary<int, string> map = new() {
/* Add operation */ /* Add operation */
// Add key-value pair (key, value) to the hash table // Add key-value pair (key, value) to hash table
{ 12836, "Xiao Ha" }, { 12836, "Xiao Ha" },
{ 15937, "Xiao Luo" }, { 15937, "Xiao Luo" },
{ 16750, "Xiao Suan" }, { 16750, "Xiao Suan" },
@ -135,7 +135,7 @@ Common operations of a hash table include: initialization, querying, adding key-
hmap := make(map[int]string) hmap := make(map[int]string)
/* Add operation */ /* Add operation */
// Add key-value pair (key, value) to the hash table // Add key-value pair (key, value) to hash table
hmap[12836] = "Xiao Ha" hmap[12836] = "Xiao Ha"
hmap[15937] = "Xiao Luo" hmap[15937] = "Xiao Luo"
hmap[16750] = "Xiao Suan" hmap[16750] = "Xiao Suan"
@ -158,7 +158,7 @@ Common operations of a hash table include: initialization, querying, adding key-
var map: [Int: String] = [:] var map: [Int: String] = [:]
/* Add operation */ /* Add operation */
// Add key-value pair (key, value) to the hash table // Add key-value pair (key, value) to hash table
map[12836] = "Xiao Ha" map[12836] = "Xiao Ha"
map[15937] = "Xiao Luo" map[15937] = "Xiao Luo"
map[16750] = "Xiao Suan" map[16750] = "Xiao Suan"
@ -202,7 +202,7 @@ Common operations of a hash table include: initialization, querying, adding key-
/* Initialize hash table */ /* Initialize hash table */
const map = new Map<number, string>(); const map = new Map<number, string>();
/* Add operation */ /* Add operation */
// Add key-value pair (key, value) to the hash table // Add key-value pair (key, value) to hash table
map.set(12836, 'Xiao Ha'); map.set(12836, 'Xiao Ha');
map.set(15937, 'Xiao Luo'); map.set(15937, 'Xiao Luo');
map.set(16750, 'Xiao Suan'); map.set(16750, 'Xiao Suan');
@ -230,7 +230,7 @@ Common operations of a hash table include: initialization, querying, adding key-
Map<int, String> map = {}; Map<int, String> map = {};
/* Add operation */ /* Add operation */
// Add key-value pair (key, value) to the hash table // Add key-value pair (key, value) to hash table
map[12836] = "Xiao Ha"; map[12836] = "Xiao Ha";
map[15937] = "Xiao Luo"; map[15937] = "Xiao Luo";
map[16750] = "Xiao Suan"; map[16750] = "Xiao Suan";
@ -255,7 +255,7 @@ Common operations of a hash table include: initialization, querying, adding key-
let mut map: HashMap<i32, String> = HashMap::new(); let mut map: HashMap<i32, String> = HashMap::new();
/* Add operation */ /* Add operation */
// Add key-value pair (key, value) to the hash table // Add key-value pair (key, value) to hash table
map.insert(12836, "Xiao Ha".to_string()); map.insert(12836, "Xiao Ha".to_string());
map.insert(15937, "Xiao Luo".to_string()); map.insert(15937, "Xiao Luo".to_string());
map.insert(16750, "Xiao Suan".to_string()); map.insert(16750, "Xiao Suan".to_string());
@ -502,10 +502,10 @@ First, let's consider the simplest case: **implementing a hash table using only
So, how do we locate the corresponding bucket based on the `key`? This is achieved through a <u>hash function</u>. The role of the hash function is to map a larger input space to a smaller output space. In a hash table, the input space consists of all the keys, and the output space consists of all the buckets (array indices). In other words, given a `key`, **we can use the hash function to determine the storage location of the corresponding key-value pair in the array**. So, how do we locate the corresponding bucket based on the `key`? This is achieved through a <u>hash function</u>. The role of the hash function is to map a larger input space to a smaller output space. In a hash table, the input space consists of all the keys, and the output space consists of all the buckets (array indices). In other words, given a `key`, **we can use the hash function to determine the storage location of the corresponding key-value pair in the array**.
When given a `key`, the calculation process of the hash function consists of the following two steps: With a given `key`, the calculation of the hash function consists of two steps:
1. Calculate the hash value by using a certain hash algorithm `hash()`. 1. Calculate the hash value by using a certain hash algorithm `hash()`.
2. Take the modulus of the hash value with the bucket count (array length) `capacity` to obtain the array `index` corresponding to that key. 2. Take the modulus of the hash value with the bucket count (array length) `capacity` to obtain the array `index` corresponding to the key.
```shell ```shell
index = hash(key) % capacity index = hash(key) % capacity

@ -69,13 +69,6 @@ After the pivot partitioning, the original array is divided into three parts: le
=== "C++" === "C++"
```cpp title="quick_sort.cpp" ```cpp title="quick_sort.cpp"
/* Swap elements */
void swap(vector<int> &nums, int i, int j) {
int tmp = nums[i];
nums[i] = nums[j];
nums[j] = tmp;
}
/* Partition */ /* Partition */
int partition(vector<int> &nums, int left, int right) { int partition(vector<int> &nums, int left, int right) {
// Use nums[left] as the pivot // Use nums[left] as the pivot

@ -15,15 +15,15 @@ As shown in Figure 7-16, a <u>binary search tree</u> satisfies the following con
## 7.4.1 &nbsp; Operations on a binary search tree ## 7.4.1 &nbsp; Operations on a binary search tree
We encapsulate the binary search tree as a class `BinarySearchTree` and declare a member variable `root`, pointing to the tree's root node. We encapsulate the binary search tree as a class `BinarySearchTree` and declare a member variable `root` pointing to the tree's root node.
### 1. &nbsp; Searching for a node ### 1. &nbsp; Searching for a node
Given a target node value `num`, one can search according to the properties of the binary search tree. As shown in Figure 7-17, we declare a node `cur` and start from the binary tree's root node `root`, looping to compare the size relationship between the node value `cur.val` and `num`. Given a target node value `num`, one can search according to the properties of the binary search tree. As shown in Figure 7-17, we declare a node `cur`, start from the binary tree's root node `root`, and loop to compare the size between the node value `cur.val` and `num`.
- If `cur.val < num`, it means the target node is in `cur`'s right subtree, thus execute `cur = cur.right`. - If `cur.val < num`, it means the target node is in `cur`'s right subtree, thus execute `cur = cur.right`.
- If `cur.val > num`, it means the target node is in `cur`'s left subtree, thus execute `cur = cur.left`. - If `cur.val > num`, it means the target node is in `cur`'s left subtree, thus execute `cur = cur.left`.
- If `cur.val = num`, it means the target node is found, exit the loop and return the node. - If `cur.val = num`, it means the target node is found, exit the loop, and return the node.
=== "<1>" === "<1>"
![Example of searching for a node in a binary search tree](binary_search_tree.assets/bst_search_step1.png){ class="animation-figure" } ![Example of searching for a node in a binary search tree](binary_search_tree.assets/bst_search_step1.png){ class="animation-figure" }
@ -39,7 +39,7 @@ Given a target node value `num`, one can search according to the properties of t
<p align="center"> Figure 7-17 &nbsp; Example of searching for a node in a binary search tree </p> <p align="center"> Figure 7-17 &nbsp; Example of searching for a node in a binary search tree </p>
The search operation in a binary search tree works on the same principle as the binary search algorithm, eliminating half of the possibilities in each round. The number of loops is at most the height of the binary tree. When the binary tree is balanced, it uses $O(\log n)$ time. Example code is as follows: The search operation in a binary search tree works on the same principle as the binary search algorithm, eliminating half of the cases in each round. The number of loops is at most the height of the binary tree. When the binary tree is balanced, it uses $O(\log n)$ time. The example code is as follows:
=== "Python" === "Python"
@ -177,8 +177,8 @@ The search operation in a binary search tree works on the same principle as the
Given an element `num` to be inserted, to maintain the property of the binary search tree "left subtree < root node < right subtree," the insertion operation proceeds as shown in Figure 7-18. Given an element `num` to be inserted, to maintain the property of the binary search tree "left subtree < root node < right subtree," the insertion operation proceeds as shown in Figure 7-18.
1. **Finding the insertion position**: Similar to the search operation, start from the root node and loop downwards according to the size relationship between the current node value and `num` until passing through the leaf node (traversing to `None`) then exit the loop. 1. **Finding insertion position**: Similar to the search operation, start from the root node, loop downwards according to the size relationship between the current node value and `num`, until the leaf node is passed (traversed to `None`), then exit the loop.
2. **Insert the node at that position**: Initialize the node `num` and place it where `None` was. 2. **Insert the node at this position**: Initialize the node `num` and place it where `None` was.
![Inserting a node into a binary search tree](binary_search_tree.assets/bst_insert.png){ class="animation-figure" } ![Inserting a node into a binary search tree](binary_search_tree.assets/bst_insert.png){ class="animation-figure" }
@ -186,8 +186,8 @@ Given an element `num` to be inserted, to maintain the property of the binary se
In the code implementation, note the following two points. In the code implementation, note the following two points.
- The binary search tree does not allow duplicate nodes; otherwise, it will violate its definition. Therefore, if the node to be inserted already exists in the tree, the insertion is not performed, and it directly returns. - The binary search tree does not allow duplicate nodes to exist; otherwise, its definition would be violated. Therefore, if the node to be inserted already exists in the tree, the insertion is not performed, and the node returns directly.
- To perform the insertion operation, we need to use the node `pre` to save the node from the last loop. This way, when traversing to `None`, we can get its parent node, thus completing the node insertion operation. - To perform the insertion operation, we need to use the node `pre` to save the node from the previous loop. This way, when traversing to `None`, we can get its parent node, thus completing the node insertion operation.
=== "Python" === "Python"
@ -355,9 +355,9 @@ Similar to searching for a node, inserting a node uses $O(\log n)$ time.
### 3. &nbsp; Removing a node ### 3. &nbsp; Removing a node
First, find the target node in the binary tree, then remove it. Similar to inserting a node, we need to ensure that after the removal operation is completed, the property of the binary search tree "left subtree < root node < right subtree" is still satisfied. Therefore, based on the number of child nodes of the target node, we divide it into 0, 1, and 2 cases, performing the corresponding node removal operations. First, find the target node in the binary tree, then remove it. Similar to inserting a node, we need to ensure that after the removal operation is completed, the property of the binary search tree "left subtree < root node < right subtree" is still satisfied. Therefore, based on the number of child nodes of the target node, we divide it into three cases: 0, 1, and 2, and perform the corresponding node removal operations.
As shown in Figure 7-19, when the degree of the node to be removed is $0$, it means the node is a leaf node, and it can be directly removed. As shown in Figure 7-19, when the degree of the node to be removed is $0$, it means the node is a leaf node and can be directly removed.
![Removing a node in a binary search tree (degree 0)](binary_search_tree.assets/bst_remove_case1.png){ class="animation-figure" } ![Removing a node in a binary search tree (degree 0)](binary_search_tree.assets/bst_remove_case1.png){ class="animation-figure" }
@ -623,9 +623,9 @@ The operation of removing a node also uses $O(\log n)$ time, where finding the n
### 4. &nbsp; In-order traversal is ordered ### 4. &nbsp; In-order traversal is ordered
As shown in Figure 7-22, the in-order traversal of a binary tree follows the "left $\rightarrow$ root $\rightarrow$ right" traversal order, and a binary search tree satisfies the size relationship "left child node $<$ root node $<$ right child node". As shown in Figure 7-22, the in-order traversal of a binary tree follows the traversal order of "left $\rightarrow$ root $\rightarrow$ right," and a binary search tree satisfies the size relationship of "left child node $<$ root node $<$ right child node."
This means that in-order traversal in a binary search tree always traverses the next smallest node first, thus deriving an important property: **The in-order traversal sequence of a binary search tree is ascending**. This means that when performing in-order traversal in a binary search tree, the next smallest node will always be traversed first, thus leading to an important property: **The sequence of in-order traversal in a binary search tree is ascending**.
Using the ascending property of in-order traversal, obtaining ordered data in a binary search tree requires only $O(n)$ time, without the need for additional sorting operations, which is very efficient. Using the ascending property of in-order traversal, obtaining ordered data in a binary search tree requires only $O(n)$ time, without the need for additional sorting operations, which is very efficient.
@ -635,7 +635,7 @@ Using the ascending property of in-order traversal, obtaining ordered data in a
## 7.4.2 &nbsp; Efficiency of binary search trees ## 7.4.2 &nbsp; Efficiency of binary search trees
Given a set of data, we consider using an array or a binary search tree for storage. Observing Table 7-2, the operations on a binary search tree all have logarithmic time complexity, which is stable and efficient. Only in scenarios of high-frequency addition and low-frequency search and removal, arrays are more efficient than binary search trees. Given a set of data, we consider using an array or a binary search tree for storage. Observing Table 7-2, the operations on a binary search tree all have logarithmic time complexity, which is stable and efficient. Arrays are more efficient than binary search trees only in scenarios involving frequent additions and infrequent searches or removals.
<p align="center"> Table 7-2 &nbsp; Efficiency comparison between arrays and search trees </p> <p align="center"> Table 7-2 &nbsp; Efficiency comparison between arrays and search trees </p>
@ -649,9 +649,9 @@ Given a set of data, we consider using an array or a binary search tree for stor
</div> </div>
In ideal conditions, the binary search tree is "balanced," thus any node can be found within $\log n$ loops. Ideally, the binary search tree is "balanced," allowing any node can be found within $\log n$ loops.
However, continuously inserting and removing nodes in a binary search tree may lead to the binary tree degenerating into a chain list as shown in Figure 7-23, at which point the time complexity of various operations also degrades to $O(n)$. However, if we continuously insert and remove nodes in a binary search tree, it may degenerate into a linked list as shown in Figure 7-23, where the time complexity of various operations also degrades to $O(n)$.
![Degradation of a binary search tree](binary_search_tree.assets/bst_degradation.png){ class="animation-figure" } ![Degradation of a binary search tree](binary_search_tree.assets/bst_degradation.png){ class="animation-figure" }

@ -4,15 +4,15 @@ comments: true
# 7.2 &nbsp; Binary tree traversal # 7.2 &nbsp; Binary tree traversal
From the perspective of physical structure, a tree is a data structure based on linked lists, hence its traversal method involves accessing nodes one by one through pointers. However, a tree is a non-linear data structure, which makes traversing a tree more complex than traversing a linked list, requiring the assistance of search algorithms to achieve. From a physical structure perspective, a tree is a data structure based on linked lists. Hence, its traversal method involves accessing nodes one by one through pointers. However, a tree is a non-linear data structure, which makes traversing a tree more complex than traversing a linked list, requiring the assistance of search algorithms.
Common traversal methods for binary trees include level-order traversal, pre-order traversal, in-order traversal, and post-order traversal, among others. The common traversal methods for binary trees include level-order traversal, pre-order traversal, in-order traversal, and post-order traversal.
## 7.2.1 &nbsp; Level-order traversal ## 7.2.1 &nbsp; Level-order traversal
As shown in Figure 7-9, <u>level-order traversal</u> traverses the binary tree from top to bottom, layer by layer, and accesses nodes in each layer in a left-to-right order. As shown in Figure 7-9, <u>level-order traversal</u> traverses the binary tree from top to bottom, layer by layer. Within each level, it visits nodes from left to right.
Level-order traversal essentially belongs to <u>breadth-first traversal</u>, also known as <u>breadth-first search (BFS)</u>, which embodies a "circumferentially outward expanding" layer-by-layer traversal method. Level-order traversal is essentially a type of <u>breadth-first traversal</u>, also known as <u>breadth-first search (BFS)</u>, which embodies a "circumferentially outward expanding" layer-by-layer traversal method.
![Level-order traversal of a binary tree](binary_tree_traversal.assets/binary_tree_bfs.png){ class="animation-figure" } ![Level-order traversal of a binary tree](binary_tree_traversal.assets/binary_tree_bfs.png){ class="animation-figure" }
@ -155,14 +155,14 @@ Breadth-first traversal is usually implemented with the help of a "queue". The q
### 2. &nbsp; Complexity analysis ### 2. &nbsp; Complexity analysis
- **Time complexity is $O(n)$**: All nodes are visited once, using $O(n)$ time, where $n$ is the number of nodes. - **Time complexity is $O(n)$**: All nodes are visited once, taking $O(n)$ time, where $n$ is the number of nodes.
- **Space complexity is $O(n)$**: In the worst case, i.e., a full binary tree, before traversing to the lowest level, the queue can contain at most $(n + 1) / 2$ nodes at the same time, occupying $O(n)$ space. - **Space complexity is $O(n)$**: In the worst case, i.e., a full binary tree, before traversing to the bottom level, the queue can contain at most $(n + 1) / 2$ nodes simultaneously, occupying $O(n)$ space.
## 7.2.2 &nbsp; Preorder, in-order, and post-order traversal ## 7.2.2 &nbsp; Preorder, in-order, and post-order traversal
Correspondingly, pre-order, in-order, and post-order traversal all belong to <u>depth-first traversal</u>, also known as <u>depth-first search (DFS)</u>, which embodies a "proceed to the end first, then backtrack and continue" traversal method. Correspondingly, pre-order, in-order, and post-order traversal all belong to <u>depth-first traversal</u>, also known as <u>depth-first search (DFS)</u>, which embodies a "proceed to the end first, then backtrack and continue" traversal method.
Figure 7-10 shows the working principle of performing a depth-first traversal on a binary tree. **Depth-first traversal is like walking around the perimeter of the entire binary tree**, encountering three positions at each node, corresponding to pre-order traversal, in-order traversal, and post-order traversal. Figure 7-10 shows the working principle of performing a depth-first traversal on a binary tree. **Depth-first traversal is like "walking" around the entire binary tree**, encountering three positions at each node, corresponding to pre-order, in-order, and post-order traversal.
![Preorder, in-order, and post-order traversal of a binary search tree](binary_tree_traversal.assets/binary_tree_dfs.png){ class="animation-figure" } ![Preorder, in-order, and post-order traversal of a binary search tree](binary_tree_traversal.assets/binary_tree_dfs.png){ class="animation-figure" }
@ -428,4 +428,4 @@ Figure 7-11 shows the recursive process of pre-order traversal of a binary tree,
### 2. &nbsp; Complexity analysis ### 2. &nbsp; Complexity analysis
- **Time complexity is $O(n)$**: All nodes are visited once, using $O(n)$ time. - **Time complexity is $O(n)$**: All nodes are visited once, using $O(n)$ time.
- **Space complexity is $O(n)$**: In the worst case, i.e., the tree degrades into a linked list, the recursion depth reaches $n$, the system occupies $O(n)$ stack frame space. - **Space complexity is $O(n)$**: In the worst case, i.e., the tree degenerates into a linked list, the recursion depth reaches $n$, the system occupies $O(n)$ stack frame space.

@ -1443,8 +1443,8 @@ comments: true
/* 雙向鏈結串列節點類別 */ /* 雙向鏈結串列節點類別 */
class ListNode { class ListNode {
int val; // 節點值 int val; // 節點值
ListNode next; // 指向後繼節點的引用 ListNode? next; // 指向後繼節點的引用
ListNode prev; // 指向前驅節點的引用 ListNode? prev; // 指向前驅節點的引用
ListNode(this.val, [this.next, this.prev]); // 建構子 ListNode(this.val, [this.next, this.prev]); // 建構子
} }
``` ```

@ -1721,7 +1721,7 @@ comments: true
remove(index) { remove(index) {
if (index < 0 || index >= this.#size) throw new Error('索引越界'); if (index < 0 || index >= this.#size) throw new Error('索引越界');
let num = this.#arr[index]; let num = this.#arr[index];
// 將索引 index 之後的元素都向前移動一位 // 將索引 index 之後的元素都向前移動一位
for (let j = index; j < this.#size - 1; j++) { for (let j = index; j < this.#size - 1; j++) {
this.#arr[j] = this.#arr[j + 1]; this.#arr[j] = this.#arr[j + 1];
} }

@ -29,7 +29,7 @@ comments: true
鏈結串列由節點組成,節點之間透過引用(指標)連線,各個節點可以儲存不同型別的資料,例如 `int`、`double`、`string`、`object` 等。 鏈結串列由節點組成,節點之間透過引用(指標)連線,各個節點可以儲存不同型別的資料,例如 `int`、`double`、`string`、`object` 等。
相對地,陣列元素則必須是相同型別的,這樣才能透過計算偏移量來獲取對應元素位置。例如,陣列同時包含 `int``long` 兩種型別,單個元素分別佔用 4 位元組 和 8 位元組 ,此時就不能用以下公式計算偏移量了,因為陣列中包含了兩種“元素長度”。 相對地,陣列元素則必須是相同型別的,這樣才能透過計算偏移量來獲取對應元素位置。例如,陣列同時包含 `int``long` 兩種型別,單個元素分別佔用 4 位元組和 8 位元組 ,此時就不能用以下公式計算偏移量了,因為陣列中包含了兩種“元素長度”。
```shell ```shell
# 元素記憶體位址 = 陣列記憶體位址(首元素記憶體位址) + 元素長度 * 元素索引 # 元素記憶體位址 = 陣列記憶體位址(首元素記憶體位址) + 元素長度 * 元素索引

@ -532,11 +532,7 @@ comments: true
) { ) {
// 當放置完所有行時,記錄解 // 當放置完所有行時,記錄解
if row == n { if row == n {
let mut copy_state: Vec<Vec<String>> = Vec::new(); res.push(state.clone());
for s_row in state.clone() {
copy_state.push(s_row);
}
res.push(copy_state);
return; return;
} }
// 走訪所有列 // 走訪所有列
@ -547,12 +543,12 @@ comments: true
// 剪枝:不允許該格子所在列、主對角線、次對角線上存在皇后 // 剪枝:不允許該格子所在列、主對角線、次對角線上存在皇后
if !cols[col] && !diags1[diag1] && !diags2[diag2] { if !cols[col] && !diags1[diag1] && !diags2[diag2] {
// 嘗試:將皇后放置在該格子 // 嘗試:將皇后放置在該格子
state.get_mut(row).unwrap()[col] = "Q".into(); state[row][col] = "Q".into();
(cols[col], diags1[diag1], diags2[diag2]) = (true, true, true); (cols[col], diags1[diag1], diags2[diag2]) = (true, true, true);
// 放置下一行 // 放置下一行
backtrack(row + 1, n, state, res, cols, diags1, diags2); backtrack(row + 1, n, state, res, cols, diags1, diags2);
// 回退:將該格子恢復為空位 // 回退:將該格子恢復為空位
state.get_mut(row).unwrap()[col] = "#".into(); state[row][col] = "#".into();
(cols[col], diags1[diag1], diags2[diag2]) = (false, false, false); (cols[col], diags1[diag1], diags2[diag2]) = (false, false, false);
} }
} }
@ -561,14 +557,7 @@ comments: true
/* 求解 n 皇后 */ /* 求解 n 皇后 */
fn n_queens(n: usize) -> Vec<Vec<Vec<String>>> { fn n_queens(n: usize) -> Vec<Vec<Vec<String>>> {
// 初始化 n*n 大小的棋盤,其中 'Q' 代表皇后,'#' 代表空位 // 初始化 n*n 大小的棋盤,其中 'Q' 代表皇后,'#' 代表空位
let mut state: Vec<Vec<String>> = Vec::new(); let mut state: Vec<Vec<String>> = vec![vec!["#".to_string(); n]; n];
for _ in 0..n {
let mut row: Vec<String> = Vec::new();
for _ in 0..n {
row.push("#".into());
}
state.push(row);
}
let mut cols = vec![false; n]; // 記錄列是否有皇后 let mut cols = vec![false; n]; // 記錄列是否有皇后
let mut diags1 = vec![false; 2 * n - 1]; // 記錄主對角線上是否有皇后 let mut diags1 = vec![false; 2 * n - 1]; // 記錄主對角線上是否有皇后
let mut diags2 = vec![false; 2 * n - 1]; // 記錄次對角線上是否有皇后 let mut diags2 = vec![false; 2 * n - 1]; // 記錄次對角線上是否有皇后

@ -355,7 +355,7 @@ comments: true
```rust title="subset_sum_i_naive.rs" ```rust title="subset_sum_i_naive.rs"
/* 回溯演算法:子集和 I */ /* 回溯演算法:子集和 I */
fn backtrack( fn backtrack(
mut state: Vec<i32>, state: &mut Vec<i32>,
target: i32, target: i32,
total: i32, total: i32,
choices: &[i32], choices: &[i32],
@ -363,7 +363,7 @@ comments: true
) { ) {
// 子集和等於 target 時,記錄解 // 子集和等於 target 時,記錄解
if total == target { if total == target {
res.push(state); res.push(state.clone());
return; return;
} }
// 走訪所有選擇 // 走訪所有選擇
@ -375,7 +375,7 @@ comments: true
// 嘗試:做出選擇,更新元素和 total // 嘗試:做出選擇,更新元素和 total
state.push(choices[i]); state.push(choices[i]);
// 進行下一輪選擇 // 進行下一輪選擇
backtrack(state.clone(), target, total + choices[i], choices, res); backtrack(state, target, total + choices[i], choices, res);
// 回退:撤銷選擇,恢復到之前的狀態 // 回退:撤銷選擇,恢復到之前的狀態
state.pop(); state.pop();
} }
@ -383,10 +383,10 @@ comments: true
/* 求解子集和 I包含重複子集 */ /* 求解子集和 I包含重複子集 */
fn subset_sum_i_naive(nums: &[i32], target: i32) -> Vec<Vec<i32>> { fn subset_sum_i_naive(nums: &[i32], target: i32) -> Vec<Vec<i32>> {
let state = Vec::new(); // 狀態(子集) let mut state = Vec::new(); // 狀態(子集)
let total = 0; // 子集和 let total = 0; // 子集和
let mut res = Vec::new(); // 結果串列(子集串列) let mut res = Vec::new(); // 結果串列(子集串列)
backtrack(state, target, total, nums, &mut res); backtrack(&mut state, target, total, nums, &mut res);
res res
} }
``` ```
@ -912,7 +912,7 @@ comments: true
```rust title="subset_sum_i.rs" ```rust title="subset_sum_i.rs"
/* 回溯演算法:子集和 I */ /* 回溯演算法:子集和 I */
fn backtrack( fn backtrack(
mut state: Vec<i32>, state: &mut Vec<i32>,
target: i32, target: i32,
choices: &[i32], choices: &[i32],
start: usize, start: usize,
@ -920,7 +920,7 @@ comments: true
) { ) {
// 子集和等於 target 時,記錄解 // 子集和等於 target 時,記錄解
if target == 0 { if target == 0 {
res.push(state); res.push(state.clone());
return; return;
} }
// 走訪所有選擇 // 走訪所有選擇
@ -934,7 +934,7 @@ comments: true
// 嘗試:做出選擇,更新 target, start // 嘗試:做出選擇,更新 target, start
state.push(choices[i]); state.push(choices[i]);
// 進行下一輪選擇 // 進行下一輪選擇
backtrack(state.clone(), target - choices[i], choices, i, res); backtrack(state, target - choices[i], choices, i, res);
// 回退:撤銷選擇,恢復到之前的狀態 // 回退:撤銷選擇,恢復到之前的狀態
state.pop(); state.pop();
} }
@ -942,11 +942,11 @@ comments: true
/* 求解子集和 I */ /* 求解子集和 I */
fn subset_sum_i(nums: &mut [i32], target: i32) -> Vec<Vec<i32>> { fn subset_sum_i(nums: &mut [i32], target: i32) -> Vec<Vec<i32>> {
let state = Vec::new(); // 狀態(子集) let mut state = Vec::new(); // 狀態(子集)
nums.sort(); // 對 nums 進行排序 nums.sort(); // 對 nums 進行排序
let start = 0; // 走訪起始點 let start = 0; // 走訪起始點
let mut res = Vec::new(); // 結果串列(子集串列) let mut res = Vec::new(); // 結果串列(子集串列)
backtrack(state, target, nums, start, &mut res); backtrack(&mut state, target, nums, start, &mut res);
res res
} }
``` ```
@ -1512,7 +1512,7 @@ comments: true
```rust title="subset_sum_ii.rs" ```rust title="subset_sum_ii.rs"
/* 回溯演算法:子集和 II */ /* 回溯演算法:子集和 II */
fn backtrack( fn backtrack(
mut state: Vec<i32>, state: &mut Vec<i32>,
target: i32, target: i32,
choices: &[i32], choices: &[i32],
start: usize, start: usize,
@ -1520,7 +1520,7 @@ comments: true
) { ) {
// 子集和等於 target 時,記錄解 // 子集和等於 target 時,記錄解
if target == 0 { if target == 0 {
res.push(state); res.push(state.clone());
return; return;
} }
// 走訪所有選擇 // 走訪所有選擇
@ -1539,7 +1539,7 @@ comments: true
// 嘗試:做出選擇,更新 target, start // 嘗試:做出選擇,更新 target, start
state.push(choices[i]); state.push(choices[i]);
// 進行下一輪選擇 // 進行下一輪選擇
backtrack(state.clone(), target - choices[i], choices, i, res); backtrack(state, target - choices[i], choices, i + 1, res);
// 回退:撤銷選擇,恢復到之前的狀態 // 回退:撤銷選擇,恢復到之前的狀態
state.pop(); state.pop();
} }
@ -1547,11 +1547,11 @@ comments: true
/* 求解子集和 II */ /* 求解子集和 II */
fn subset_sum_ii(nums: &mut [i32], target: i32) -> Vec<Vec<i32>> { fn subset_sum_ii(nums: &mut [i32], target: i32) -> Vec<Vec<i32>> {
let state = Vec::new(); // 狀態(子集) let mut state = Vec::new(); // 狀態(子集)
nums.sort(); // 對 nums 進行排序 nums.sort(); // 對 nums 進行排序
let start = 0; // 走訪起始點 let start = 0; // 走訪起始點
let mut res = Vec::new(); // 結果串列(子集串列) let mut res = Vec::new(); // 結果串列(子集串列)
backtrack(state, target, nums, start, &mut res); backtrack(&mut state, target, nums, start, &mut res);
res res
} }
``` ```

@ -54,7 +54,7 @@ $$
1. 初始狀態下,指標 $i$ 和 $j$ 分列陣列兩端。 1. 初始狀態下,指標 $i$ 和 $j$ 分列陣列兩端。
2. 計算當前狀態的容量 $cap[i, j]$ ,並更新最大容量。 2. 計算當前狀態的容量 $cap[i, j]$ ,並更新最大容量。
3. 比較板 $i$ 和 板 $j$ 的高度,並將短板向內移動一格。 3. 比較板 $i$ 和板 $j$ 的高度,並將短板向內移動一格。
4. 迴圈執行第 `2.` 步和第 `3.` 步,直至 $i$ 和 $j$ 相遇時結束。 4. 迴圈執行第 `2.` 步和第 `3.` 步,直至 $i$ 和 $j$ 相遇時結束。
=== "<1>" === "<1>"

@ -12,7 +12,7 @@ comments: true
1. 翻開字典約一半的頁數,檢視該頁的首字母是什麼,假設首字母為 $m$ 。 1. 翻開字典約一半的頁數,檢視該頁的首字母是什麼,假設首字母為 $m$ 。
2. 由於在拼音字母表中 $r$ 位於 $m$ 之後,所以排除字典前半部分,查詢範圍縮小到後半部分。 2. 由於在拼音字母表中 $r$ 位於 $m$ 之後,所以排除字典前半部分,查詢範圍縮小到後半部分。
3. 不斷重複步驟 `1.` 步驟 `2.` ,直至找到拼音首字母為 $r$ 的頁碼為止。 3. 不斷重複步驟 `1.` 和步驟 `2.` ,直至找到拼音首字母為 $r$ 的頁碼為止。
=== "<1>" === "<1>"
![查字典步驟](algorithms_are_everywhere.assets/binary_search_dictionary_step1.png){ class="animation-figure" } ![查字典步驟](algorithms_are_everywhere.assets/binary_search_dictionary_step1.png){ class="animation-figure" }

@ -69,26 +69,19 @@ comments: true
=== "C++" === "C++"
```cpp title="quick_sort.cpp" ```cpp title="quick_sort.cpp"
/* 元素交換 */
void swap(vector<int> &nums, int i, int j) {
int tmp = nums[i];
nums[i] = nums[j];
nums[j] = tmp;
}
/* 哨兵劃分 */ /* 哨兵劃分 */
int partition(vector<int> &nums, int left, int right) { int partition(vector<int> &nums, int left, int right) {
// 以 nums[left] 為基準數 // 以 nums[left] 為基準數
int i = left, j = right; int i = left, j = right;
while (i < j) { while (i < j) {
while (i < j && nums[j] >= nums[left]) while (i < j && nums[j] >= nums[left])
j--; // 從右向左找首個小於基準數的元素 j--; // 從右向左找首個小於基準數的元素
while (i < j && nums[i] <= nums[left]) while (i < j && nums[i] <= nums[left])
i++; // 從左向右找首個大於基準數的元素 i++; // 從左向右找首個大於基準數的元素
swap(nums, i, j); // 交換這兩個元素 swap(nums[i], nums[j]); // 交換這兩個元素
} }
swap(nums, i, left); // 將基準數交換至兩子陣列的分界線 swap(nums[i], nums[left]); // 將基準數交換至兩子陣列的分界線
return i; // 返回基準數的索引 return i; // 返回基準數的索引
} }
``` ```
@ -721,18 +714,18 @@ comments: true
// 選取三個候選元素的中位數 // 選取三個候選元素的中位數
int med = medianThree(nums, left, (left + right) / 2, right); int med = medianThree(nums, left, (left + right) / 2, right);
// 將中位數交換至陣列最左端 // 將中位數交換至陣列最左端
swap(nums, left, med); swap(nums[left], nums[med]);
// 以 nums[left] 為基準數 // 以 nums[left] 為基準數
int i = left, j = right; int i = left, j = right;
while (i < j) { while (i < j) {
while (i < j && nums[j] >= nums[left]) while (i < j && nums[j] >= nums[left])
j--; // 從右向左找首個小於基準數的元素 j--; // 從右向左找首個小於基準數的元素
while (i < j && nums[i] <= nums[left]) while (i < j && nums[i] <= nums[left])
i++; // 從左向右找首個大於基準數的元素 i++; // 從左向右找首個大於基準數的元素
swap(nums, i, j); // 交換這兩個元素 swap(nums[i], nums[j]); // 交換這兩個元素
} }
swap(nums, i, left); // 將基準數交換至兩子陣列的分界線 swap(nums[i], nums[left]); // 將基準數交換至兩子陣列的分界線
return i; // 返回基準數的索引 return i; // 返回基準數的索引
} }
``` ```

@ -665,7 +665,7 @@ comments: true
### 2. &nbsp; 完全二元樹 ### 2. &nbsp; 完全二元樹
如圖 7-5 所示,<u>完全二元樹complete binary tree</u>只有最底層的節點未被填滿,且最底層節點儘量靠左填充。 如圖 7-5 所示,<u>完全二元樹complete binary tree</u>只有最底層的節點未被填滿,且最底層節點儘量靠左填充。請注意,完美二元樹也是一棵完全二元樹。
![完全二元樹](binary_tree.assets/complete_binary_tree.png){ class="animation-figure" } ![完全二元樹](binary_tree.assets/complete_binary_tree.png){ class="animation-figure" }

Loading…
Cancel
Save