0% found this document useful (0 votes)
91 views

Notebook

This document is the contents page for the Stanford University ICPC Team Notebook from 2015-2016. It provides an overview of the topics covered in the notebook, including combinatorial optimization algorithms like various max-flow algorithms, graph algorithms like Dijkstra's algorithm, numerical algorithms, geometry topics, and data structures. The document lists each topic and subsection and provides a brief description of the algorithms and concepts covered.

Uploaded by

Dj
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
91 views

Notebook

This document is the contents page for the Stanford University ICPC Team Notebook from 2015-2016. It provides an overview of the topics covered in the notebook, including combinatorial optimization algorithms like various max-flow algorithms, graph algorithms like Dijkstra's algorithm, numerical algorithms, geometry topics, and data structures. The document lists each topic and subsection and provides a brief description of the algorithms and concepts covered.

Uploaded by

Dj
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 18

Stanford University

from(from), to(to), cap(cap), flow(flow), index(index) {}


Stanford University ICPC Team Notebook (2015-16) };
LL rcap() { return cap - flow; }

struct Dinic {
Contents int N;
vector<vector<Edge> > G;
vector<vector<Edge *> > Lf;
vector<int> layer;
1 Combinatorial optimization 1 vector<int> Q;
1.1 Sparse max-flow . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1 Dinic(int N) : N(N), G(N), Q(N) {}
1.2 Min-cost max-flow . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 2
1.3 Push-relabel max-flow . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 2 void AddEdge(int from, int to, int cap) {
1.4 Min-cost matching . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 3
if (from == to) return;
G[from].push_back(Edge(from, to, cap, 0, G[to].size()));
1.5 Max bipartite matchine . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 4 G[to].push_back(Edge(to, from, 0, 0, G[from].size() - 1));
1.6 Global min-cut . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 4 }
1.7 Graph cut inference . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 5
LL BlockingFlow(int s, int t) {
layer.clear(); layer.resize(N, -1);
2 Geometry 6 layer[s] = 0;
2.1 Convex hull . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 6
Lf.clear(); Lf.resize(N);

2.2 .
Miscellaneous geometry . . . . . . . . . . . . . . . . . . . . . . . . . . . . 6 int head = 0, tail = 0;
2.3 Java geometry . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 8 Q[tail++] = s;
2.4 3D geometry . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 9 while (head < tail) {
int x = Q[head++];
2.5 Slow Delaunay triangulation . . . . . . . . . . . . . . . . . . . . . . . . . . . 9 for (int i = 0; i < G[x].size(); i++) {
Edge &e = G[x][i]; if (e.rcap() <= 0) continue;
if (layer[e.to] == -1) {
3 Numerical algorithms 9 layer[e.to] = layer[e.from] + 1;
3.1 Number theory (modular, Chinese remainder, linear Diophantine) . . . . . . . . . . . . 9 Q[tail++] = e.to;
3.2 .
Systems of linear equations, matrix inverse, determinant . . . . . . . . . . . . . . . 10 }
3.3 Reduced row echelon form, matrix rank . . . . . . . . . . . . . . . . . . . . . . 11
if (layer[e.to] > layer[e.from]) {
Lf[e.from].push_back(&e);
3.4 Fast Fourier transform . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 11 }
3.5 Simplex algorithm . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 12 }
}
if (layer[t] == -1) return 0;
4 Graph algorithms 13
4.1 Fast Dijkstraś algorithm. . . . . . . . . . . . . . . . . . . . . . . . . . . . 13 LL totflow = 0;
vector<Edge *> P;
4.2 Strongly connected components . . . . . . . . . . . . . . . . . . . . . . . . . 13 while (!Lf[s].empty()) {
4.3 Eulerian path . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 14 int curr = P.empty() ? s : P.back()->to;
if (curr == t) { // Augment
LL amt = P.front()->rcap();
5 Data structures 14 for (int i = 0; i < P.size(); ++i) {
5.1 Suffix array . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 14 amt = min(amt, P[i]->rcap());
Binary Indexed Tree . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
5.2 15
}
totflow += amt;
5.3 Union-find set . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 15 for (int i = P.size() - 1; i >= 0; --i) {
5.4 KD-tree . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 15 P[i]->flow += amt;
5.5 Splay tree . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 16 G[P[i]->to][P[i]->index].flow -= amt;
Lazy segment tree . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . if (P[i]->rcap() <= 0) {
5.6 17
Lf[P[i]->from].pop_back();
5.7 Lowest common ancestor . . . . . . . . . . . . . . . . . . . . . . . . . . . . 17 P.resize(i);
}
}
} else if (Lf[curr].empty()) { // Retreat
1 Combinatorial optimization P.pop_back();
for (int i = 0; i < N; ++i)
for (int j = 0; j < Lf[i].size(); ++j)
if (Lf[i][j]->to == curr)
1.1 Sparse max-flow Lf[i].erase(Lf[i].begin() + j);
} else { // Advance
P.push_back(Lf[curr].back());
}
// Adjacency list implementation of Dinic’s blocking flow algorithm. }
// This is very fast in practice, and only loses to push-relabel flow. return totflow;
// }
// Running time:
// O(|V|ˆ2 |E|) LL GetMaxFlow(int s, int t) {
// LL totflow = 0;
// INPUT: while (LL flow = BlockingFlow(s, t))
// - graph, constructed using AddEdge() totflow += flow;
// - source and sink return totflow;
// }
// OUTPUT: };
// - maximum flow value
// - To obtain actual flow values, look at edges with capacity > 0 // BEGIN CUT
// (zero capacity edges are residual edges). // The following code solves SPOJ problem #4110: Fast Maximum Flow (FASTFLOW)

#include <iostream> int main() {


#include <vector> int n, m;
scanf("%d%d", &n, &m);
using namespace std;
typedef long long LL; Dinic flow(n);
for (int i = 0; i < m; i++) {
struct Edge { int a, b, c;
int from, to, cap, flow, index; scanf("%d%d%d", &a, &b, &c);

1
Edge(int from, int to, int cap, int flow, int index) : if (a == b) continue;
Stanford University
flow.AddEdge(a-1, b-1, c);
flow.AddEdge(b-1, a-1, c); for (int k = 0; k < N; k++)
} pi[k] = min(pi[k] + dist[k], INF);
printf("%Ld\n", flow.GetMaxFlow(0, n-1)); return width[t];
return 0; }
}
pair<L, L> GetMaxFlow(int s, int t) {
// END CUT L totflow = 0, totcost = 0;
while (L amt = Dijkstra(s, t)) {
totflow += amt;
for (int x = t; x != s; x = dad[x].first) {
if (dad[x].second == 1) {
1.2 Min-cost max-flow flow[dad[x].first][x] += amt;
totcost += amt * cost[dad[x].first][x];
} else {
// Implementation of min cost max flow algorithm using adjacency flow[x][dad[x].first] -= amt;
// matrix (Edmonds and Karp 1972). This implementation keeps track of totcost -= amt * cost[x][dad[x].first];
// forward and reverse edges separately (so you can set cap[i][j] != }
// cap[j][i]). For a regular max flow, set all edge costs to 0. }
// }
// Running time, O(|V|ˆ2) cost per augmentation return make_pair(totflow, totcost);
// max flow: O(|V|ˆ3) augmentations }
// min cost max flow: O(|V|ˆ4 * MAX_EDGE_COST) augmentations };
//
// INPUT: // BEGIN CUT
// - graph, constructed using AddEdge() // The following code solves UVA problem #10594: Data Flow
// - source
// - sink int main() {
// int N, M;
// OUTPUT:
// - (maximum flow value, minimum cost value) while (scanf("%d%d", &N, &M) == 2) {
// - To obtain the actual flow, look at positive values only. VVL v(M, VL(3));
for (int i = 0; i < M; i++)
#include <cmath> scanf("%Ld%Ld%Ld", &v[i][0], &v[i][1], &v[i][2]);
#include <vector> L D, K;
#include <iostream> scanf("%Ld%Ld", &D, &K);

using namespace std; MinCostMaxFlow mcmf(N+1);


for (int i = 0; i < M; i++) {
typedef vector<int> VI; mcmf.AddEdge(int(v[i][0]), int(v[i][1]), K, v[i][2]);
typedef vector<VI> VVI; mcmf.AddEdge(int(v[i][1]), int(v[i][0]), K, v[i][2]);
typedef long long L; }
typedef vector<L> VL; mcmf.AddEdge(0, 1, D, 0);
typedef vector<VL> VVL;
typedef pair<int, int> PII; pair<L, L> res = mcmf.GetMaxFlow(0, N);
typedef vector<PII> VPII;
if (res.first == D) {
const L INF = numeric_limits<L>::max() / 4; printf("%Ld\n", res.second);
} else {
struct MinCostMaxFlow { printf("Impossible.\n");
int N; }
VVL cap, flow, cost; }
VI found;
VL dist, pi, width; return 0;
VPII dad; }

MinCostMaxFlow(int N) : // END CUT


N(N), cap(N, VL(N)), flow(N, VL(N)), cost(N, VL(N)),
found(N), dist(N), pi(N), width(N), dad(N) {}

void AddEdge(int from, int to, L cap, L cost) {


this->cap[from][to] = cap;
this->cost[from][to] = cost;
1.3 Push-relabel max-flow
}
// Adjacency list implementation of FIFO push relabel maximum flow
void Relax(int s, int k, L cap, L cost, int dir) { // with the gap relabeling heuristic. This implementation is
L val = dist[s] + pi[s] - pi[k] + cost; // significantly faster than straight Ford-Fulkerson. It solves
if (cap && val < dist[k]) { // random problems with 10000 vertices and 1000000 edges in a few
dist[k] = val; // seconds, though it is possible to construct test cases that
dad[k] = make_pair(s, dir); // achieve the worst-case.
width[k] = min(cap, width[s]); //
} // Running time:
} // O(|V|ˆ3)
//
L Dijkstra(int s, int t) { // INPUT:
fill(found.begin(), found.end(), false); // - graph, constructed using AddEdge()
fill(dist.begin(), dist.end(), INF); // - source
fill(width.begin(), width.end(), 0); // - sink
dist[s] = 0; //
width[s] = INF; // OUTPUT:
// - maximum flow value
while (s != -1) { // - To obtain the actual flow values, look at all edges with
int best = -1; // capacity > 0 (zero capacity edges are residual edges).
found[s] = true;
for (int k = 0; k < N; k++) { #include <cmath>
if (found[k]) continue; #include <vector>
Relax(s, k, cap[s][k] - flow[s][k], cost[s][k], 1); #include <iostream>
Relax(s, k, flow[k][s], -cost[k][s], -1); #include <queue>
if (best == -1 || dist[k] < dist[best]) best = k;
} using namespace std;
s = best;

2
} typedef long long LL;
Stanford University
int main() {
struct Edge { int n, m;
int from, to, cap, flow, index; scanf("%d%d", &n, &m);
Edge(int from, int to, int cap, int flow, int index) :
from(from), to(to), cap(cap), flow(flow), index(index) {} PushRelabel pr(n);
}; for (int i = 0; i < m; i++) {
int a, b, c;
struct PushRelabel { scanf("%d%d%d", &a, &b, &c);
int N; if (a == b) continue;
vector<vector<Edge> > G; pr.AddEdge(a-1, b-1, c);
vector<LL> excess; pr.AddEdge(b-1, a-1, c);
vector<int> dist, active, count; }
queue<int> Q; printf("%Ld\n", pr.GetMaxFlow(0, n-1));
return 0;
PushRelabel(int N) : N(N), G(N), excess(N), dist(N), active(N), count(2*N) {} }

void AddEdge(int from, int to, int cap) { // END CUT


G[from].push_back(Edge(from, to, cap, 0, G[to].size()));
if (from == to) G[from].back().index++;
G[to].push_back(Edge(to, from, 0, 0, G[from].size() - 1));
}

void Enqueue(int v) { 1.4 Min-cost matching


if (!active[v] && excess[v] > 0) { active[v] = true; Q.push(v); }
}
//////////////////////////////////////////////////////////////////////
void Push(Edge &e) { // Min cost bipartite matching via shortest augmenting paths
int amt = int(min(excess[e.from], LL(e.cap - e.flow))); //
if (dist[e.from] <= dist[e.to] || amt == 0) return; // This is an O(nˆ3) implementation of a shortest augmenting path
e.flow += amt; // algorithm for finding min cost perfect matchings in dense
G[e.to][e.index].flow -= amt; // graphs. In practice, it solves 1000x1000 problems in around 1
excess[e.to] += amt; // second.
excess[e.from] -= amt; //
Enqueue(e.to); // cost[i][j] = cost for pairing left node i with right node j
} // Lmate[i] = index of right node that left node i pairs with
// Rmate[j] = index of left node that right node j pairs with
void Gap(int k) { //
for (int v = 0; v < N; v++) { // The values in cost[i][j] may be positive or negative. To perform
if (dist[v] < k) continue; // maximization, simply negate the cost[][] matrix.
count[dist[v]]--; //////////////////////////////////////////////////////////////////////
dist[v] = max(dist[v], N+1);
count[dist[v]]++; #include <algorithm>
Enqueue(v); #include <cstdio>
} #include <cmath>
} #include <vector>

void Relabel(int v) { using namespace std;


count[dist[v]]--;
dist[v] = 2*N; typedef vector<double> VD;
for (int i = 0; i < G[v].size(); i++) typedef vector<VD> VVD;
if (G[v][i].cap - G[v][i].flow > 0) typedef vector<int> VI;
dist[v] = min(dist[v], dist[G[v][i].to] + 1);
count[dist[v]]++; double MinCostMatching(const VVD &cost, VI &Lmate, VI &Rmate) {
Enqueue(v); int n = int(cost.size());
}
// construct dual feasible solution
void Discharge(int v) { VD u(n);
for (int i = 0; excess[v] > 0 && i < G[v].size(); i++) Push(G[v][i]); VD v(n);
if (excess[v] > 0) { for (int i = 0; i < n; i++) {
if (count[dist[v]] == 1) u[i] = cost[i][0];
Gap(dist[v]); for (int j = 1; j < n; j++) u[i] = min(u[i], cost[i][j]);
else }
Relabel(v); for (int j = 0; j < n; j++) {
} v[j] = cost[0][j] - u[0];
} for (int i = 1; i < n; i++) v[j] = min(v[j], cost[i][j] - u[i]);
}
LL GetMaxFlow(int s, int t) {
count[0] = N-1; // construct primal solution satisfying complementary slackness
count[N] = 1; Lmate = VI(n, -1);
dist[s] = N; Rmate = VI(n, -1);
active[s] = active[t] = true; int mated = 0;
for (int i = 0; i < G[s].size(); i++) { for (int i = 0; i < n; i++) {
excess[s] += G[s][i].cap; for (int j = 0; j < n; j++) {
Push(G[s][i]); if (Rmate[j] != -1) continue;
} if (fabs(cost[i][j] - u[i] - v[j]) < 1e-10) {
Lmate[i] = j;
while (!Q.empty()) { Rmate[j] = i;
int v = Q.front(); mated++;
Q.pop(); break;
active[v] = false; }
Discharge(v); }
} }

LL totflow = 0; VD dist(n);
for (int i = 0; i < G[s].size(); i++) totflow += G[s][i].flow; VI dad(n);
return totflow; VI seen(n);
}
}; // repeat until primal solution is feasible
while (mated < n) {
// BEGIN CUT
// The following code solves SPOJ problem #4110: Fast Maximum Flow (FASTFLOW) // find an unmatched left node

3
int s = 0;
Stanford University
while (Lmate[s] != -1) s++; mc[j] = i;
return true;
// initialize Dijkstra }
fill(dad.begin(), dad.end(), -1); }
fill(seen.begin(), seen.end(), 0); }
for (int k = 0; k < n; k++) return false;
dist[k] = cost[s][k] - u[s] - v[k]; }

int j = 0; int BipartiteMatching(const VVI &w, VI &mr, VI &mc) {


while (true) { mr = VI(w.size(), -1);
mc = VI(w[0].size(), -1);
// find closest
j = -1; int ct = 0;
for (int k = 0; k < n; k++) { for (int i = 0; i < w.size(); i++) {
if (seen[k]) continue; VI seen(w[0].size());
if (j == -1 || dist[k] < dist[j]) j = k; if (FindMatch(i, w, mr, mc, seen)) ct++;
} }
seen[j] = 1; return ct;
}
// termination condition
if (Rmate[j] == -1) break;

// relax neighbors
const int i = Rmate[j];
for (int k = 0; k < n; k++) {
1.6 Global min-cut
if (seen[k]) continue;
const double new_dist = dist[j] + cost[i][k] - u[i] - v[k]; // Adjacency matrix implementation of Stoer-Wagner min cut algorithm.
if (dist[k] > new_dist) { //
dist[k] = new_dist; // Running time:
dad[k] = j; // O(|V|ˆ3)
} //
} // INPUT:
} // - graph, constructed using AddEdge()
//
// update dual variables // OUTPUT:
for (int k = 0; k < n; k++) { // - (min cut value, nodes in half of min cut)
if (k == j || !seen[k]) continue;
const int i = Rmate[k]; #include <cmath>
v[k] += dist[k] - dist[j]; #include <vector>
u[i] -= dist[k] - dist[j]; #include <iostream>
}
u[s] += dist[j]; using namespace std;

// augment along path typedef vector<int> VI;


while (dad[j] >= 0) { typedef vector<VI> VVI;
const int d = dad[j];
Rmate[j] = Rmate[d]; const int INF = 1000000000;
Lmate[Rmate[j]] = j;
j = d; pair<int, VI> GetMinCut(VVI &weights) {
} int N = weights.size();
Rmate[j] = s; VI used(N), cut, best_cut;
Lmate[s] = j; int best_weight = -1;

mated++; for (int phase = N-1; phase >= 0; phase--) {


} VI w = weights[0];
VI added = used;
double value = 0; int prev, last = 0;
for (int i = 0; i < n; i++) for (int i = 0; i < phase; i++) {
value += cost[i][Lmate[i]]; prev = last;
last = -1;
return value; for (int j = 1; j < N; j++)
} if (!added[j] && (last == -1 || w[j] > w[last])) last = j;
if (i == phase-1) {
for (int j = 0; j < N; j++) weights[prev][j] += weights[last][j];
for (int j = 0; j < N; j++) weights[j][prev] = weights[prev][j];
used[last] = true;
1.5 Max bipartite matchine cut.push_back(last);
if (best_weight == -1 || w[last] < best_weight) {
best_cut = cut;
best_weight = w[last];
// This code performs maximum bipartite matching. }
// } else {
// Running time: O(|E| |V|) -- often much faster in practice for (int j = 0; j < N; j++)
// w[j] += weights[last][j];
// INPUT: w[i][j] = edge between row node i and column node j added[last] = true;
// OUTPUT: mr[i] = assignment for row node i, -1 if unassigned }
// mc[j] = assignment for column node j, -1 if unassigned }
// function returns number of matches made }
return make_pair(best_weight, best_cut);
#include <vector> }

using namespace std; // BEGIN CUT


// The following code solves UVA problem #10989: Bomb, Divide and Conquer
typedef vector<int> VI; int main() {
typedef vector<VI> VVI; int N;
cin >> N;
bool FindMatch(int i, const VVI &w, VI &mr, VI &mc, VI &seen) { for (int i = 0; i < N; i++) {
for (int j = 0; j < w[i].size(); j++) { int n, m;
if (w[i][j] && !seen[j]) { cin >> n >> m;
seen[j] = true; VVI weights(n, VI(n));
if (mc[j] < 0 || FindMatch(mc[j], w, mr, mc, seen)) { for (int j = 0; j < m; j++) {

4
mr[i] = j; int a, b, c;
Stanford University
cin >> a >> b >> c; int M = phi.size();
weights[a-1][b-1] = weights[b-1][a-1] = c; cap = VVI(M+2, VI(M+2));
} VI b(M);
pair<int, VI> res = GetMinCut(weights); int c = 0;
cout << "Case #" << i+1 << ": " << res.first << endl;
} for (int i = 0; i < M; i++) {
} b[i] += psi[i][1] - psi[i][0];
// END CUT c += psi[i][0];
for (int j = 0; j < i; j++)
b[i] += phi[i][j][1][1] - phi[i][j][0][1];
for (int j = i+1; j < M; j++) {
cap[i][j] = phi[i][j][0][1] + phi[i][j][1][0] - phi[i][j][0][0] - phi[i][j][1][1];
1.7 Graph cut inference b[i] += phi[i][j][1][0] - phi[i][j][0][0];
c += phi[i][j][0][0];
}
}
// Special-purpose {0,1} combinatorial optimization solver for
// problems of the following by a reduction to graph cuts: #ifdef MAXIMIZATION
// for (int i = 0; i < M; i++) {
// minimize sum_i psi_i(x[i]) for (int j = i+1; j < M; j++)
// x[1]...x[n] in {0,1} + sum_{i < j} phi_{ij}(x[i], x[j]) cap[i][j] *= -1;
// b[i] *= -1;
// where }
// psi_i : {0, 1} --> R c *= -1;
// phi_{ij} : {0, 1} x {0, 1} --> R #endif
//
// such that for (int i = 0; i < M; i++) {
// phi_{ij}(0,0) + phi_{ij}(1,1) <= phi_{ij}(0,1) + phi_{ij}(1,0) (*) if (b[i] >= 0) {
// cap[M][i] = b[i];
// This can also be used to solve maximization problems where the } else {
// direction of the inequality in (*) is reversed. cap[i][M+1] = -b[i];
// c += b[i];
// INPUT: phi -- a matrix such that phi[i][j][u][v] = phi_{ij}(u, v) }
// psi -- a matrix such that psi[i][u] = psi_i(u) }
// x -- a vector where the optimal solution will be stored
// int score = GetMaxFlow(M, M+1);
// OUTPUT: value of the optimal solution fill(reached.begin(), reached.end(), 0);
// Augment(M, M+1, INF);
// To use this code, create a GraphCutInference object, and call the x = VI(M);
// DoInference() method. To perform maximization instead of minimization, for (int i = 0; i < M; i++) x[i] = reached[i] ? 0 : 1;
// ensure that #define MAXIMIZATION is enabled. score += c;
#ifdef MAXIMIZATION
#include <vector> score *= -1;
#include <iostream> #endif

using namespace std; return score;


}
typedef vector<int> VI;
typedef vector<VI> VVI; };
typedef vector<VVI> VVVI;
typedef vector<VVVI> VVVVI; int main() {

const int INF = 1000000000; // solver for "Cat vs. Dog" from NWERC 2008

// comment out following line for minimization int numcases;


#define MAXIMIZATION cin >> numcases;
for (int caseno = 0; caseno < numcases; caseno++) {
struct GraphCutInference { int c, d, v;
int N; cin >> c >> d >> v;
VVI cap, flow;
VI reached; VVVVI phi(c+d, VVVI(c+d, VVI(2, VI(2))));
VVI psi(c+d, VI(2));
int Augment(int s, int t, int a) { for (int i = 0; i < v; i++) {
reached[s] = 1; char p, q;
if (s == t) return a; int u, v;
for (int k = 0; k < N; k++) { cin >> p >> u >> q >> v;
if (reached[k]) continue; u--; v--;
if (int aa = min(a, cap[s][k] - flow[s][k])) { if (p == ’C’) {
if (int b = Augment(k, t, aa)) { phi[u][c+v][0][0]++;
flow[s][k] += b; phi[c+v][u][0][0]++;
flow[k][s] -= b; } else {
return b; phi[v][c+u][1][1]++;
} phi[c+u][v][1][1]++;
} }
} }
return 0;
} GraphCutInference graph;
VI x;
int GetMaxFlow(int s, int t) { cout << graph.DoInference(phi, psi, x) << endl;
N = cap.size(); }
flow = VVI(N, VI(N));
reached = VI(N); return 0;
}
int totflow = 0;
while (int amt = Augment(s, t, INF)) {
totflow += amt;
fill(reached.begin(), reached.end(), 0);
}
return totflow;
}

5
int DoInference(const VVVVI &phi, const VVI &psi, VI &x) {
Stanford University
for (int i = 0; i < n; i++) scanf("%lf%lf", &v[i].x, &v[i].y);
vector<PT> h(v);

2 Geometry map<PT,int> index;


for (int i = n-1; i >= 0; i--) index[v[i]] = i+1;
ConvexHull(h);

double len = 0;
2.1 Convex hull for (int i = 0; i < h.size(); i++) {
double dx = h[i].x - h[(i+1)%h.size()].x;
double dy = h[i].y - h[(i+1)%h.size()].y;
// Compute the 2D convex hull of a set of points using the monotone chain len += sqrt(dx*dx+dy*dy);
// algorithm. Eliminate redundant points from the hull if REMOVE_REDUNDANT is }
// #defined.
// if (caseno > 0) printf("\n");
// Running time: O(n log n) printf("%.2f\n", len);
// for (int i = 0; i < h.size(); i++) {
// INPUT: a vector of input points, unordered. if (i > 0) printf(" ");
// OUTPUT: a vector of points in the convex hull, counterclockwise, starting printf("%d", index[h[i]]);
// with bottommost/leftmost point }
printf("\n");
#include <cstdio> }
#include <cassert> }
#include <vector>
#include <algorithm> // END CUT
#include <cmath>
// BEGIN CUT
#include <map>
// END CUT

using namespace std;


2.2 Miscellaneous geometry
#define REMOVE_REDUNDANT
// C++ routines for computational geometry.
typedef double T;
const T EPS = 1e-7; #include <iostream>
struct PT { #include <vector>
T x, y; #include <cmath>
PT() {} #include <cassert>
PT(T x, T y) : x(x), y(y) {}
bool operator<(const PT &rhs) const { return make_pair(y,x) < make_pair(rhs.y,rhs.x); } using namespace std;
bool operator==(const PT &rhs) const { return make_pair(y,x) == make_pair(rhs.y,rhs.x); }
}; double INF = 1e100;
double EPS = 1e-12;
T cross(PT p, PT q) { return p.x*q.y-p.y*q.x; }
T area2(PT a, PT b, PT c) { return cross(a,b) + cross(b,c) + cross(c,a); } struct PT {
double x, y;
#ifdef REMOVE_REDUNDANT PT() {}
bool between(const PT &a, const PT &b, const PT &c) { PT(double x, double y) : x(x), y(y) {}
return (fabs(area2(a,b,c)) < EPS && (a.x-b.x)*(c.x-b.x) <= 0 && (a.y-b.y)*(c.y-b.y) <= 0); PT(const PT &p) : x(p.x), y(p.y) {}
} PT operator + (const PT &p) const { return PT(x+p.x, y+p.y); }
#endif PT operator - (const PT &p) const { return PT(x-p.x, y-p.y); }
PT operator * (double c) const { return PT(x*c, y*c ); }
void ConvexHull(vector<PT> &pts) { PT operator / (double c) const { return PT(x/c, y/c ); }
sort(pts.begin(), pts.end()); };
pts.erase(unique(pts.begin(), pts.end()), pts.end());
vector<PT> up, dn; double dot(PT p, PT q) { return p.x*q.x+p.y*q.y; }
for (int i = 0; i < pts.size(); i++) { double dist2(PT p, PT q) { return dot(p-q,p-q); }
while (up.size() > 1 && area2(up[up.size()-2], up.back(), pts[i]) >= 0) up.pop_back(); double cross(PT p, PT q) { return p.x*q.y-p.y*q.x; }
while (dn.size() > 1 && area2(dn[dn.size()-2], dn.back(), pts[i]) <= 0) dn.pop_back(); ostream &operator<<(ostream &os, const PT &p) {
up.push_back(pts[i]); os << "(" << p.x << "," << p.y << ")";
dn.push_back(pts[i]); }
}
pts = dn; // rotate a point CCW or CW around the origin
for (int i = (int) up.size() - 2; i >= 1; i--) pts.push_back(up[i]); PT RotateCCW90(PT p) { return PT(-p.y,p.x); }
PT RotateCW90(PT p) { return PT(p.y,-p.x); }
#ifdef REMOVE_REDUNDANT PT RotateCCW(PT p, double t) {
if (pts.size() <= 2) return; return PT(p.x*cos(t)-p.y*sin(t), p.x*sin(t)+p.y*cos(t));
dn.clear(); }
dn.push_back(pts[0]);
dn.push_back(pts[1]); // project point c onto line through a and b
for (int i = 2; i < pts.size(); i++) { // assuming a != b
if (between(dn[dn.size()-2], dn[dn.size()-1], pts[i])) dn.pop_back(); PT ProjectPointLine(PT a, PT b, PT c) {
dn.push_back(pts[i]); return a + (b-a)*dot(c-a, b-a)/dot(b-a, b-a);
} }
if (dn.size() >= 3 && between(dn.back(), dn[0], dn[1])) {
dn[0] = dn.back(); // project point c onto line segment through a and b
dn.pop_back(); PT ProjectPointSegment(PT a, PT b, PT c) {
} double r = dot(b-a,b-a);
pts = dn; if (fabs(r) < EPS) return a;
#endif r = dot(c-a, b-a)/r;
} if (r < 0) return a;
if (r > 1) return b;
// BEGIN CUT return a + (b-a)*r;
// The following code solves SPOJ problem #26: Build the Fence (BSHEEP) }

int main() { // compute distance from c to segment between a and b


int t; double DistancePointSegment(PT a, PT b, PT c) {
scanf("%d", &t); return sqrt(dist2(c, ProjectPointSegment(a, b, c)));
for (int caseno = 0; caseno < t; caseno++) { }
int n;
scanf("%d", &n); // compute distance between point (x,y,z) and plane ax+by+cz=d

6
vector<PT> v(n); double DistancePointPlane(double x, double y, double z,
Stanford University
double a, double b, double c, double d) // compute intersection of circle centered at a with radius r
{ // with circle centered at b with radius R
return fabs(a*x+b*y+c*z-d)/sqrt(a*a+b*b+c*c); vector<PT> CircleCircleIntersection(PT a, PT b, double r, double R) {
} vector<PT> ret;
double d = sqrt(dist2(a, b));
// determine if lines from a to b and c to d are parallel or collinear if (d > r+R || d+min(r, R) < max(r, R)) return ret;
bool LinesParallel(PT a, PT b, PT c, PT d) { double x = (d*d-R*R+r*r)/(2*d);
return fabs(cross(b-a, c-d)) < EPS; double y = sqrt(r*r-x*x);
} PT v = (b-a)/d;
ret.push_back(a+v*x + RotateCCW90(v)*y);
bool LinesCollinear(PT a, PT b, PT c, PT d) { if (y > 0)
return LinesParallel(a, b, c, d) ret.push_back(a+v*x - RotateCCW90(v)*y);
&& fabs(cross(a-b, a-c)) < EPS return ret;
&& fabs(cross(c-d, c-a)) < EPS; }
}
// This code computes the area or centroid of a (possibly nonconvex)
// determine if line segment from a to b intersects with // polygon, assuming that the coordinates are listed in a clockwise or
// line segment from c to d // counterclockwise fashion. Note that the centroid is often known as
bool SegmentsIntersect(PT a, PT b, PT c, PT d) { // the "center of gravity" or "center of mass".
if (LinesCollinear(a, b, c, d)) { double ComputeSignedArea(const vector<PT> &p) {
if (dist2(a, c) < EPS || dist2(a, d) < EPS || double area = 0;
dist2(b, c) < EPS || dist2(b, d) < EPS) return true; for(int i = 0; i < p.size(); i++) {
if (dot(c-a, c-b) > 0 && dot(d-a, d-b) > 0 && dot(c-b, d-b) > 0) int j = (i+1) % p.size();
return false; area += p[i].x*p[j].y - p[j].x*p[i].y;
return true; }
} return area / 2.0;
if (cross(d-a, b-a) * cross(c-a, b-a) > 0) return false; }
if (cross(a-c, d-c) * cross(b-c, d-c) > 0) return false;
return true; double ComputeArea(const vector<PT> &p) {
} return fabs(ComputeSignedArea(p));
}
// compute intersection of line passing through a and b
// with line passing through c and d, assuming that unique PT ComputeCentroid(const vector<PT> &p) {
// intersection exists; for segment intersection, check if PT c(0,0);
// segments intersect first double scale = 6.0 * ComputeSignedArea(p);
PT ComputeLineIntersection(PT a, PT b, PT c, PT d) { for (int i = 0; i < p.size(); i++){
b=b-a; d=c-d; c=c-a; int j = (i+1) % p.size();
assert(dot(b, b) > EPS && dot(d, d) > EPS); c = c + (p[i]+p[j])*(p[i].x*p[j].y - p[j].x*p[i].y);
return a + b*cross(c, d)/cross(b, d); }
} return c / scale;
}
// compute center of circle given three points
PT ComputeCircleCenter(PT a, PT b, PT c) { // tests whether or not a given polygon (in CW or CCW order) is simple
b=(a+b)/2; bool IsSimple(const vector<PT> &p) {
c=(a+c)/2; for (int i = 0; i < p.size(); i++) {
return ComputeLineIntersection(b, b+RotateCW90(a-b), c, c+RotateCW90(a-c)); for (int k = i+1; k < p.size(); k++) {
} int j = (i+1) % p.size();
int l = (k+1) % p.size();
// determine if point is in a possibly non-convex polygon (by William if (i == l || j == k) continue;
// Randolph Franklin); returns 1 for strictly interior points, 0 for if (SegmentsIntersect(p[i], p[j], p[k], p[l]))
// strictly exterior points, and 0 or 1 for the remaining points. return false;
// Note that it is possible to convert this into an *exact* test using }
// integer arithmetic by taking care of the division appropriately }
// (making sure to deal with signs properly) and then by writing exact return true;
// tests for checking point on polygon boundary }
bool PointInPolygon(const vector<PT> &p, PT q) {
bool c = 0; int main() {
for (int i = 0; i < p.size(); i++){
int j = (i+1)%p.size(); // expected: (-5,2)
if ((p[i].y <= q.y && q.y < p[j].y || cerr << RotateCCW90(PT(2,5)) << endl;
p[j].y <= q.y && q.y < p[i].y) &&
q.x < p[i].x + (p[j].x - p[i].x) * (q.y - p[i].y) / (p[j].y - p[i].y)) // expected: (5,-2)
c = !c; cerr << RotateCW90(PT(2,5)) << endl;
}
return c; // expected: (-5,2)
} cerr << RotateCCW(PT(2,5),M_PI/2) << endl;

// determine if point is on the boundary of a polygon // expected: (5,2)


bool PointOnPolygon(const vector<PT> &p, PT q) { cerr << ProjectPointLine(PT(-5,-2), PT(10,4), PT(3,7)) << endl;
for (int i = 0; i < p.size(); i++)
if (dist2(ProjectPointSegment(p[i], p[(i+1)%p.size()], q), q) < EPS) // expected: (5,2) (7.5,3) (2.5,1)
return true; cerr << ProjectPointSegment(PT(-5,-2), PT(10,4), PT(3,7)) << " "
return false; << ProjectPointSegment(PT(7.5,3), PT(10,4), PT(3,7)) << " "
} << ProjectPointSegment(PT(-5,-2), PT(2.5,1), PT(3,7)) << endl;

// compute intersection of line through points a and b with // expected: 6.78903


// circle centered at c with radius r > 0 cerr << DistancePointPlane(4,-4,3,2,-2,5,-8) << endl;
vector<PT> CircleLineIntersection(PT a, PT b, PT c, double r) {
vector<PT> ret; // expected: 1 0 1
b = b-a; cerr << LinesParallel(PT(1,1), PT(3,5), PT(2,1), PT(4,5)) << " "
a = a-c; << LinesParallel(PT(1,1), PT(3,5), PT(2,0), PT(4,5)) << " "
double A = dot(b, b); << LinesParallel(PT(1,1), PT(3,5), PT(5,9), PT(7,13)) << endl;
double B = dot(a, b);
double C = dot(a, a) - r*r; // expected: 0 0 1
double D = B*B - A*C; cerr << LinesCollinear(PT(1,1), PT(3,5), PT(2,1), PT(4,5)) << " "
if (D < -EPS) return ret; << LinesCollinear(PT(1,1), PT(3,5), PT(2,0), PT(4,5)) << " "
ret.push_back(c+a+b*(-B+sqrt(D+EPS))/A); << LinesCollinear(PT(1,1), PT(3,5), PT(5,9), PT(7,13)) << endl;
if (D > EPS)
ret.push_back(c+a+b*(-B-sqrt(D))/A); // expected: 1 1 1 0
return ret; cerr << SegmentsIntersect(PT(0,0), PT(2,4), PT(3,1), PT(-1,3)) << " "
} << SegmentsIntersect(PT(0,0), PT(2,4), PT(4,3), PT(0,5)) << " "

7
<< SegmentsIntersect(PT(0,0), PT(2,4), PT(2,-1), PT(-2,1)) << " "
Stanford University
<< SegmentsIntersect(PT(0,0), PT(2,4), PT(5,5), PT(1,7)) << endl; public class JavaGeometry {

// expected: (1,2) // make an array of doubles from a string


cerr << ComputeLineIntersection(PT(0,0), PT(2,4), PT(3,1), PT(-1,3)) << endl; static double[] readPoints(String s) {
String[] arr = s.trim().split("\\s++");
// expected: (1,1) double[] ret = new double[arr.length];
cerr << ComputeCircleCenter(PT(-3,4), PT(6,1), PT(4,5)) << endl; for (int i = 0; i < arr.length; i++) ret[i] = Double.parseDouble(arr[i]);
return ret;
vector<PT> v; }
v.push_back(PT(0,0));
v.push_back(PT(5,0)); // make an Area object from the coordinates of a polygon
v.push_back(PT(5,5)); static Area makeArea(double[] pts) {
v.push_back(PT(0,5)); Path2D.Double p = new Path2D.Double();
p.moveTo(pts[0], pts[1]);
// expected: 1 1 1 0 0 for (int i = 2; i < pts.length; i += 2) p.lineTo(pts[i], pts[i+1]);
cerr << PointInPolygon(v, PT(2,2)) << " " p.closePath();
<< PointInPolygon(v, PT(2,0)) << " " return new Area(p);
<< PointInPolygon(v, PT(0,2)) << " " }
<< PointInPolygon(v, PT(5,2)) << " "
<< PointInPolygon(v, PT(2,5)) << endl; // compute area of polygon
static double computePolygonArea(ArrayList<Point2D.Double> points) {
// expected: 0 1 1 1 1 Point2D.Double[] pts = points.toArray(new Point2D.Double[points.size()]);
cerr << PointOnPolygon(v, PT(2,2)) << " " double area = 0;
<< PointOnPolygon(v, PT(2,0)) << " " for (int i = 0; i < pts.length; i++){
<< PointOnPolygon(v, PT(0,2)) << " " int j = (i+1) % pts.length;
<< PointOnPolygon(v, PT(5,2)) << " " area += pts[i].x * pts[j].y - pts[j].x * pts[i].y;
<< PointOnPolygon(v, PT(2,5)) << endl; }
return Math.abs(area)/2;
// expected: (1,6) }
// (5,4) (4,5)
// blank line // compute the area of an Area object containing several disjoint polygons
// (4,5) (5,4) static double computeArea(Area area) {
// blank line double totArea = 0;
// (4,5) (5,4) PathIterator iter = area.getPathIterator(null);
vector<PT> u = CircleLineIntersection(PT(0,6), PT(2,6), PT(1,1), 5); ArrayList<Point2D.Double> points = new ArrayList<Point2D.Double>();
for (int i = 0; i < u.size(); i++) cerr << u[i] << " "; cerr << endl;
u = CircleLineIntersection(PT(0,9), PT(9,0), PT(1,1), 5); while (!iter.isDone()) {
for (int i = 0; i < u.size(); i++) cerr << u[i] << " "; cerr << endl; double[] buffer = new double[6];
u = CircleCircleIntersection(PT(1,1), PT(10,10), 5, 5); switch (iter.currentSegment(buffer)) {
for (int i = 0; i < u.size(); i++) cerr << u[i] << " "; cerr << endl; case PathIterator.SEG_MOVETO:
u = CircleCircleIntersection(PT(1,1), PT(8,8), 5, 5); case PathIterator.SEG_LINETO:
for (int i = 0; i < u.size(); i++) cerr << u[i] << " "; cerr << endl; points.add(new Point2D.Double(buffer[0], buffer[1]));
u = CircleCircleIntersection(PT(1,1), PT(4.5,4.5), 10, sqrt(2.0)/2.0); break;
for (int i = 0; i < u.size(); i++) cerr << u[i] << " "; cerr << endl; case PathIterator.SEG_CLOSE:
u = CircleCircleIntersection(PT(1,1), PT(4.5,4.5), 5, sqrt(2.0)/2.0); totArea += computePolygonArea(points);
for (int i = 0; i < u.size(); i++) cerr << u[i] << " "; cerr << endl; points.clear();
break;
// area should be 5.0 }
// centroid should be (1.1666666, 1.166666) iter.next();
PT pa[] = { PT(0,0), PT(5,0), PT(1,1), PT(0,5) }; }
vector<PT> p(pa, pa+4); return totArea;
PT c = ComputeCentroid(p); }
cerr << "Area: " << ComputeArea(p) << endl;
cerr << "Centroid: " << c << endl; // notice that the main() throws an Exception -- necessary to
// avoid wrapping the Scanner object for file reading in a
return 0; // try { ... } catch block.
} public static void main(String args[]) throws Exception {

Scanner scanner = new Scanner(new File("input.txt"));


// also,
// Scanner scanner = new Scanner (System.in);
2.3 Java geometry double[] pointsA = readPoints(scanner.nextLine());
double[] pointsB = readPoints(scanner.nextLine());
Area areaA = makeArea(pointsA);
// In this example, we read an input file containing three lines, each Area areaB = makeArea(pointsB);
// containing an even number of doubles, separated by commas. The first two areaB.subtract(areaA);
// lines represent the coordinates of two polygons, given in counterclockwise // also,
// (or clockwise) order, which we will call "A" and "B". The last line // areaB.exclusiveOr (areaA);
// contains a list of points, p[1], p[2], ... // areaB.add (areaA);
// // areaB.intersect (areaA);
// Our goal is to determine:
// (1) whether B - A is a single closed shape (as opposed to multiple shapes) // (1) determine whether B - A is a single closed shape (as
// (2) the area of B - A // opposed to multiple shapes)
// (3) whether each p[i] is in the interior of B - A boolean isSingle = areaB.isSingular();
// // also,
// INPUT: // areaB.isEmpty();
// 0 0 10 0 0 10
// 0 0 10 10 10 0 if (isSingle)
// 8 6 System.out.println("The area is singular.");
// 5 1 else
// System.out.println("The area is not singular.");
// OUTPUT:
// The area is singular. // (2) compute the area of B - A
// The area is 25.0 System.out.println("The area is " + computeArea(areaB) + ".");
// Point belongs to the area.
// Point does not belong to the area. // (3) determine whether each p[i] is in the interior of B - A
while (scanner.hasNextDouble()) {
import java.util.*; double x = scanner.nextDouble();
import java.awt.geom.*; assert(scanner.hasNextDouble());
import java.io.*; double y = scanner.nextDouble();

8
Stanford University
if (areaB.contains(x,y)) {
System.out.println ("Point belongs to the area.");
} else {
System.out.println ("Point does not belong to the area.");
2.5 Slow Delaunay triangulation
}
}
// Slow but simple Delaunay triangulation. Does not handle
// Finally, some useful things we didn’t use in this example: // degenerate cases (from O’Rourke, Computational Geometry in C)
// //
// Ellipse2D.Double ellipse = new Ellipse2D.Double (double x, double y, // Running time: O(nˆ4)
// double w, double h); //
// // INPUT: x[] = x-coordinates
// creates an ellipse inscribed in box with bottom-left corner (x,y) // y[] = y-coordinates
// and upper-right corner (x+y,w+h) //
// // OUTPUT: triples = a vector containing m triples of indices
// Rectangle2D.Double rect = new Rectangle2D.Double (double x, double y, // corresponding to triangle vertices
// double w, double h);
// #include<vector>
// creates a box with bottom-left corner (x,y) and upper-right using namespace std;
// corner (x+y,w+h)
// typedef double T;
// Each of these can be embedded in an Area object (e.g., new Area (rect)).
struct triple {
} int i, j, k;
} triple() {}
triple(int i, int j, int k) : i(i), j(j), k(k) {}
};

vector<triple> delaunayTriangulation(vector<T>& x, vector<T>& y) {


int n = x.size();
2.4 3D geometry vector<T> z(n);
vector<triple> ret;

for (int i = 0; i < n; i++)


public class Geom3D { z[i] = x[i] * x[i] + y[i] * y[i];
// distance from point (x, y, z) to plane aX + bY + cZ + d = 0
public static double ptPlaneDist(double x, double y, double z, for (int i = 0; i < n-2; i++) {
double a, double b, double c, double d) { for (int j = i+1; j < n; j++) {
return Math.abs(a*x + b*y + c*z + d) / Math.sqrt(a*a + b*b + c*c); for (int k = i+1; k < n; k++) {
} if (j == k) continue;
double xn = (y[j]-y[i])*(z[k]-z[i]) - (y[k]-y[i])*(z[j]-z[i]);
// distance between parallel planes aX + bY + cZ + d1 = 0 and double yn = (x[k]-x[i])*(z[j]-z[i]) - (x[j]-x[i])*(z[k]-z[i]);
// aX + bY + cZ + d2 = 0 double zn = (x[j]-x[i])*(y[k]-y[i]) - (x[k]-x[i])*(y[j]-y[i]);
public static double planePlaneDist(double a, double b, double c, bool flag = zn < 0;
double d1, double d2) { for (int m = 0; flag && m < n; m++)
return Math.abs(d1 - d2) / Math.sqrt(a*a + b*b + c*c); flag = flag && ((x[m]-x[i])*xn +
} (y[m]-y[i])*yn +
(z[m]-z[i])*zn <= 0);
// distance from point (px, py, pz) to line (x1, y1, z1)-(x2, y2, z2) if (flag) ret.push_back(triple(i, j, k));
// (or ray, or segment; in the case of the ray, the endpoint is the }
// first point) }
public static final int LINE = 0; }
public static final int SEGMENT = 1; return ret;
public static final int RAY = 2; }
public static double ptLineDistSq(double x1, double y1, double z1,
double x2, double y2, double z2, double px, double py, double pz, int main()
int type) { {
double pd2 = (x1-x2)*(x1-x2) + (y1-y2)*(y1-y2) + (z1-z2)*(z1-z2); T xs[]={0, 0, 1, 0.9};
T ys[]={0, 1, 0, 0.9};
double x, y, z; vector<T> x(&xs[0], &xs[4]), y(&ys[0], &ys[4]);
if (pd2 == 0) { vector<triple> tri = delaunayTriangulation(x, y);
x = x1;
y = y1; //expected: 0 1 3
z = z1; // 0 3 2
} else {
double u = ((px-x1)*(x2-x1) + (py-y1)*(y2-y1) + (pz-z1)*(z2-z1)) / pd2; int i;
x = x1 + u * (x2 - x1); for(i = 0; i < tri.size(); i++)
y = y1 + u * (y2 - y1); printf("%d %d %d\n", tri[i].i, tri[i].j, tri[i].k);
z = z1 + u * (z2 - z1); return 0;
if (type != LINE && u < 0) { }
x = x1;
y = y1;
z = z1;
}
if (type == SEGMENT && u > 1.0) {
x = x2; 3 Numerical algorithms
y = y2;
z = z2;
}
} 3.1 Number theory (modular, Chinese remainder, linear
}
return (x-px)*(x-px) + (y-py)*(y-py) + (z-pz)*(z-pz); Diophantine)
public static double ptLineDist(double x1, double y1, double z1,
double x2, double y2, double z2, double px, double py, double pz, // This is a collection of useful code for solving problems that
int type) { // involve modular linear equations. Note that all of the
return Math.sqrt(ptLineDistSq(x1, y1, z1, x2, y2, z2, px, py, pz, type)); // algorithms described here work on nonnegative integers.
}
} #include <iostream>
#include <vector>

9
#include <algorithm>
Stanford University
using namespace std; // computes x and y such that ax + by = c
// returns whether the solution exists
typedef vector<int> VI; bool linear_diophantine(int a, int b, int c, int &x, int &y) {
typedef pair<int, int> PII; if (!a && !b)
{
// return a % b (positive value) if (c) return false;
int mod(int a, int b) { x = 0; y = 0;
return ((a%b) + b) % b; return true;
} }
if (!a)
// computes gcd(a,b) {
int gcd(int a, int b) { if (c % b) return false;
while (b) { int t = a%b; a = b; b = t; } x = 0; y = c / b;
return a; return true;
} }
if (!b)
// computes lcm(a,b) {
int lcm(int a, int b) { if (c % a) return false;
return a / gcd(a, b)*b; x = c / a; y = 0;
} return true;
}
// (aˆb) mod m via successive squaring int g = gcd(a, b);
int powermod(int a, int b, int m) if (c % g) return false;
{ x = c / g * mod_inverse(a / g, b / g);
int ret = 1; y = (c - a*x) / b;
while (b) return true;
{ }
if (b & 1) ret = mod(ret*a, m);
a = mod(a*a, m); int main() {
b >>= 1; // expected: 2
} cout << gcd(14, 30) << endl;
return ret;
} // expected: 2 -2 1
int x, y;
// returns g = gcd(a, b); finds x, y such that d = ax + by int g = extended_euclid(14, 30, x, y);
int extended_euclid(int a, int b, int &x, int &y) { cout << g << " " << x << " " << y << endl;
int xx = y = 0;
int yy = x = 1; // expected: 95 451
while (b) { VI sols = modular_linear_equation_solver(14, 30, 100);
int q = a / b; for (int i = 0; i < sols.size(); i++) cout << sols[i] << " ";
int t = b; b = a%b; a = t; cout << endl;
t = xx; xx = x - q*xx; x = t;
t = yy; yy = y - q*yy; y = t; // expected: 8
} cout << mod_inverse(8, 9) << endl;
return a;
} // expected: 23 105
// 11 12
// finds all solutions to ax = b (mod n) PII ret = chinese_remainder_theorem(VI({ 3, 5, 7 }), VI({ 2, 3, 2 }));
VI modular_linear_equation_solver(int a, int b, int n) { cout << ret.first << " " << ret.second << endl;
int x, y; ret = chinese_remainder_theorem(VI({ 4, 6 }), VI({ 3, 5 }));
VI ret; cout << ret.first << " " << ret.second << endl;
int g = extended_euclid(a, n, x, y);
if (!(b%g)) { // expected: 5 -15
x = mod(x*(b / g), n); if (!linear_diophantine(7, 2, 5, x, y)) cout << "ERROR" << endl;
for (int i = 0; i < g; i++) cout << x << " " << y << endl;
ret.push_back(mod(x + i*(n / g), n)); return 0;
} }
return ret;
}

// computes b such that ab = 1 (mod n), returns -1 on failure


int mod_inverse(int a, int n) {
int x, y;
3.2 Systems of linear equations, matrix inverse, determi-
int g = extended_euclid(a, n, x, y);
if (g > 1) return -1; nant
return mod(x, n);
}
// Gauss-Jordan elimination with full pivoting.
// Chinese remainder theorem (special case): find z such that //
// z % m1 = r1, z % m2 = r2. Here, z is unique modulo M = lcm(m1, m2). // Uses:
// Return (z, M). On failure, M = -1. // (1) solving systems of linear equations (AX=B)
PII chinese_remainder_theorem(int m1, int r1, int m2, int r2) { // (2) inverting matrices (AX=I)
int s, t; // (3) computing determinants of square matrices
int g = extended_euclid(m1, m2, s, t); //
if (r1%g != r2%g) return make_pair(0, -1); // Running time: O(nˆ3)
return make_pair(mod(s*r2*m1 + t*r1*m2, m1*m2) / g, m1*m2 / g); //
} // INPUT: a[][] = an nxn matrix
// b[][] = an nxm matrix
// Chinese remainder theorem: find z such that //
// z % m[i] = r[i] for all i. Note that the solution is // OUTPUT: X = an nxm matrix (stored in b[][])
// unique modulo M = lcm_i (m[i]). Return (z, M). On // Aˆ{-1} = an nxn matrix (stored in a[][])
// failure, M = -1. Note that we do not require the a[i]’s // returns determinant of a[][]
// to be relatively prime.
PII chinese_remainder_theorem(const VI &m, const VI &r) { #include <iostream>
PII ret = make_pair(r[0], m[0]); #include <vector>
for (int i = 1; i < m.size(); i++) { #include <cmath>
ret = chinese_remainder_theorem(ret.second, ret.first, m[i], r[i]);
if (ret.second == -1) break; using namespace std;
}

10
return ret; const double EPS = 1e-10;
}
Stanford University
typedef vector<int> VI; // the rank of a matrix.
typedef double T; //
typedef vector<T> VT; // Running time: O(nˆ3)
typedef vector<VT> VVT; //
// INPUT: a[][] = an nxm matrix
T GaussJordan(VVT &a, VVT &b) { //
const int n = a.size(); // OUTPUT: rref[][] = an nxm matrix (stored in a[][])
const int m = b[0].size(); // returns rank of a[][]
VI irow(n), icol(n), ipiv(n);
T det = 1; #include <iostream>
#include <vector>
for (int i = 0; i < n; i++) { #include <cmath>
int pj = -1, pk = -1;
for (int j = 0; j < n; j++) if (!ipiv[j]) using namespace std;
for (int k = 0; k < n; k++) if (!ipiv[k])
if (pj == -1 || fabs(a[j][k]) > fabs(a[pj][pk])) { pj = j; pk = k; } const double EPSILON = 1e-10;
if (fabs(a[pj][pk]) < EPS) { cerr << "Matrix is singular." << endl; exit(0); }
ipiv[pk]++; typedef double T;
swap(a[pj], a[pk]); typedef vector<T> VT;
swap(b[pj], b[pk]); typedef vector<VT> VVT;
if (pj != pk) det *= -1;
irow[i] = pj; int rref(VVT &a) {
icol[i] = pk; int n = a.size();
int m = a[0].size();
T c = 1.0 / a[pk][pk]; int r = 0;
det *= a[pk][pk]; for (int c = 0; c < m && r < n; c++) {
a[pk][pk] = 1.0; int j = r;
for (int p = 0; p < n; p++) a[pk][p] *= c; for (int i = r + 1; i < n; i++)
for (int p = 0; p < m; p++) b[pk][p] *= c; if (fabs(a[i][c]) > fabs(a[j][c])) j = i;
for (int p = 0; p < n; p++) if (p != pk) { if (fabs(a[j][c]) < EPSILON) continue;
c = a[p][pk]; swap(a[j], a[r]);
a[p][pk] = 0;
for (int q = 0; q < n; q++) a[p][q] -= a[pk][q] * c; T s = 1.0 / a[r][c];
for (int q = 0; q < m; q++) b[p][q] -= b[pk][q] * c; for (int j = 0; j < m; j++) a[r][j] *= s;
} for (int i = 0; i < n; i++) if (i != r) {
} T t = a[i][c];
for (int j = 0; j < m; j++) a[i][j] -= t * a[r][j];
for (int p = n-1; p >= 0; p--) if (irow[p] != icol[p]) { }
for (int k = 0; k < n; k++) swap(a[k][irow[p]], a[k][icol[p]]); r++;
} }
return r;
return det; }
}
int main() {
int main() { const int n = 5, m = 4;
const int n = 4; double A[n][m] = {
const int m = 2; {16, 2, 3, 13},
double A[n][n] = { {1,2,3,4},{1,0,1,0},{5,3,2,4},{6,1,4,6} }; { 5, 11, 10, 8},
double B[n][m] = { {1,2},{4,3},{5,6},{8,7} }; { 9, 7, 6, 12},
VVT a(n), b(n); { 4, 14, 15, 1},
for (int i = 0; i < n; i++) { {13, 21, 21, 13}};
a[i] = VT(A[i], A[i] + n); VVT a(n);
b[i] = VT(B[i], B[i] + m); for (int i = 0; i < n; i++)
} a[i] = VT(A[i], A[i] + m);

double det = GaussJordan(a, b); int rank = rref(a);

// expected: 60 // expected: 3
cout << "Determinant: " << det << endl; cout << "Rank: " << rank << endl;

// expected: -0.233333 0.166667 0.133333 0.0666667 // expected: 1 0 0 1


// 0.166667 0.166667 0.333333 -0.333333 // 0 1 0 3
// 0.233333 0.833333 -0.133333 -0.0666667 // 0 0 1 -3
// 0.05 -0.75 -0.1 0.2 // 0 0 0 3.10862e-15
cout << "Inverse: " << endl; // 0 0 0 2.22045e-15
for (int i = 0; i < n; i++) { cout << "rref: " << endl;
for (int j = 0; j < n; j++) for (int i = 0; i < 5; i++) {
cout << a[i][j] << ’ ’; for (int j = 0; j < 4; j++)
cout << endl; cout << a[i][j] << ’ ’;
} cout << endl;
}
// expected: 1.63333 1.3 }
// -0.166667 0.5
// 2.36667 1.7
// -1.85 -1.35
cout << "Solution: " << endl;
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++)
3.4 Fast Fourier transform
cout << b[i][j] << ’ ’;
cout << endl; #include <cassert>
} #include <cstdio>
} #include <cmath>

struct cpx
{
cpx(){}
3.3 Reduced row echelon form, matrix rank cpx(double aa):a(aa),b(0){}
cpx(double aa, double bb):a(aa),b(bb){}
double a;
double b;

11
// Reduced row echelon form via Gauss-Jordan elimination double modsq(void) const
// with partial pivoting. This can be used for computing {
Stanford University
return a * a + b * b; printf("%7.2lf%7.2lf", Ai.a, Ai.b);
} }
cpx bar(void) const printf("\n");
{
return cpx(a, -b); cpx AB[8];
} for(int i = 0 ; i < 8 ; i++)
}; AB[i] = A[i] * B[i];
cpx aconvb[8];
cpx operator +(cpx a, cpx b) FFT(AB, aconvb, 1, 8, -1);
{ for(int i = 0 ; i < 8 ; i++)
return cpx(a.a + b.a, a.b + b.b); aconvb[i] = aconvb[i] / 8;
} for(int i = 0 ; i < 8 ; i++)
{
cpx operator *(cpx a, cpx b) printf("%7.2lf%7.2lf", aconvb[i].a, aconvb[i].b);
{ }
return cpx(a.a * b.a - a.b * b.b, a.a * b.b + a.b * b.a); printf("\n");
} for(int i = 0 ; i < 8 ; i++)
{
cpx operator /(cpx a, cpx b) cpx aconvbi(0,0);
{ for(int j = 0 ; j < 8 ; j++)
cpx r = a * b.bar(); {
return cpx(r.a / b.modsq(), r.b / b.modsq()); aconvbi = aconvbi + a[j] * b[(8 + i - j) % 8];
} }
printf("%7.2lf%7.2lf", aconvbi.a, aconvbi.b);
cpx EXP(double theta) }
{ printf("\n");
return cpx(cos(theta),sin(theta));
} return 0;
}
const double two_pi = 4 * acos(0);

// in: input array


// out: output array
// step:
// size:
{SET TO 1} (used internally)
length of the input/output {MUST BE A POWER OF 2} 3.5 Simplex algorithm
// dir: either plus or minus one (direction of the FFT)
// RESULT: out[k] = \sum_{j=0}ˆ{size - 1} in[j] * exp(dir * 2pi * i * j * k / size)
void FFT(cpx *in, cpx *out, int step, int size, int dir) // Two-phase simplex algorithm for solving linear programs of the form
{ //
if(size < 1) return; // maximize cˆT x
if(size == 1) // subject to Ax <= b
{ // x >= 0
out[0] = in[0]; //
return; // INPUT: A -- an m x n matrix
} // b -- an m-dimensional vector
FFT(in, out, step * 2, size / 2, dir); // c -- an n-dimensional vector
FFT(in + step, out + size / 2, step * 2, size / 2, dir); // x -- a vector where the optimal solution will be stored
for(int i = 0 ; i < size / 2 ; i++) //
{ // OUTPUT: value of the optimal solution (infinity if unbounded
cpx even = out[i]; // above, nan if infeasible)
cpx odd = out[i + size / 2]; //
out[i] = even + EXP(dir * two_pi * i / size) * odd; // To use this code, create an LPSolver object with A, b, and c as
out[i + size / 2] = even + EXP(dir * two_pi * (i + size / 2) / size) * odd; // arguments. Then, call Solve(x).
}
} #include <iostream>
#include <iomanip>
// Usage: #include <vector>
// f[0...N-1] and g[0..N-1] are numbers #include <cmath>
// Want to compute the convolution h, defined by #include <limits>
// h[n] = sum of f[k]g[n-k] (k = 0, ..., N-1).
// Here, the index is cyclic; f[-1] = f[N-1], f[-2] = f[N-2], etc. using namespace std;
// Let F[0...N-1] be FFT(f), and similarly, define G and H.
// The convolution theorem says H[n] = F[n]G[n] (element-wise product). typedef long double DOUBLE;
// To compute h[] in O(N log N) time, do the following: typedef vector<DOUBLE> VD;
// 1. Compute F and G (pass dir = 1 as the argument). typedef vector<VD> VVD;
// 2. Get H by element-wise multiplying F and G. typedef vector<int> VI;
// 3. Get h by taking the inverse FFT (use dir = -1 as the argument)
// and *dividing by N*. DO NOT FORGET THIS SCALING FACTOR. const DOUBLE EPS = 1e-9;

int main(void) struct LPSolver {


{ int m, n;
printf("If rows come in identical pairs, then everything works.\n"); VI B, N;
VVD D;
cpx a[8] = {0, 1, cpx(1,3), cpx(0,5), 1, 0, 2, 0};
cpx b[8] = {1, cpx(0,-2), cpx(0,1), 3, -1, -3, 1, -2}; LPSolver(const VVD &A, const VD &b, const VD &c) :
cpx A[8]; m(b.size()), n(c.size()), N(n + 1), B(m), D(m + 2, VD(n + 2)) {
cpx B[8]; for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) D[i][j] = A[i][j];
FFT(a, A, 1, 8, 1); for (int i = 0; i < m; i++) { B[i] = n + i; D[i][n] = -1; D[i][n + 1] = b[i]; }
FFT(b, B, 1, 8, 1); for (int j = 0; j < n; j++) { N[j] = j; D[m][j] = -c[j]; }
N[n] = -1; D[m + 1][n] = 1;
for(int i = 0 ; i < 8 ; i++) }
{
printf("%7.2lf%7.2lf", A[i].a, A[i].b); void Pivot(int r, int s) {
} double inv = 1.0 / D[r][s];
printf("\n"); for (int i = 0; i < m + 2; i++) if (i != r)
for(int i = 0 ; i < 8 ; i++) for (int j = 0; j < n + 2; j++) if (j != s)
{ D[i][j] -= D[r][j] * D[i][s] * inv;
cpx Ai(0,0); for (int j = 0; j < n + 2; j++) if (j != s) D[r][j] *= inv;
for(int j = 0 ; j < 8 ; j++) for (int i = 0; i < m + 2; i++) if (i != r) D[i][s] *= -inv;
{ D[r][s] = inv;

12
Ai = Ai + a[j] * EXP(j * i * two_pi / 8); swap(B[r], N[s]);
} }
Stanford University
bool Simplex(int phase) { int main() {
int x = phase == 1 ? m + 1 : m;
while (true) { int N, s, t;
int s = -1; scanf("%d%d%d", &N, &s, &t);
for (int j = 0; j <= n; j++) { vector<vector<PII> > edges(N);
if (phase == 2 && N[j] == -1) continue; for (int i = 0; i < N; i++) {
if (s == -1 || D[x][j] < D[x][s] || D[x][j] == D[x][s] && N[j] < N[s]) s = j; int M;
} scanf("%d", &M);
if (D[x][s] > -EPS) return true; for (int j = 0; j < M; j++) {
int r = -1; int vertex, dist;
for (int i = 0; i < m; i++) { scanf("%d%d", &vertex, &dist);
if (D[i][s] < EPS) continue; edges[i].push_back(make_pair(dist, vertex)); // note order of arguments here
if (r == -1 || D[i][n + 1] / D[i][s] < D[r][n + 1] / D[r][s] || }
(D[i][n + 1] / D[i][s]) == (D[r][n + 1] / D[r][s]) && B[i] < B[r]) r = i; }
}
if (r == -1) return false; // use priority queue in which top element has the "smallest" priority
Pivot(r, s); priority_queue<PII, vector<PII>, greater<PII> > Q;
} vector<int> dist(N, INF), dad(N, -1);
} Q.push(make_pair(0, s));
dist[s] = 0;
DOUBLE Solve(VD &x) { while (!Q.empty()) {
int r = 0; PII p = Q.top();
for (int i = 1; i < m; i++) if (D[i][n + 1] < D[r][n + 1]) r = i; Q.pop();
if (D[r][n + 1] < -EPS) { int here = p.second;
Pivot(r, n); if (here == t) break;
if (!Simplex(1) || D[m + 1][n + 1] < -EPS) return -numeric_limits<DOUBLE>::infinity(); if (dist[here] != p.first) continue;
for (int i = 0; i < m; i++) if (B[i] == -1) {
int s = -1; for (vector<PII>::iterator it = edges[here].begin(); it != edges[here].end(); it++) {
for (int j = 0; j <= n; j++) if (dist[here] + it->first < dist[it->second]) {
if (s == -1 || D[i][j] < D[i][s] || D[i][j] == D[i][s] && N[j] < N[s]) s = j; dist[it->second] = dist[here] + it->first;
Pivot(i, s); dad[it->second] = here;
} Q.push(make_pair(dist[it->second], it->second));
} }
if (!Simplex(2)) return numeric_limits<DOUBLE>::infinity(); }
x = VD(n); }
for (int i = 0; i < m; i++) if (B[i] < n) x[B[i]] = D[i][n + 1];
return D[m][n + 1]; printf("%d\n", dist[t]);
} if (dist[t] < INF)
}; for (int i = t; i != -1; i = dad[i])
printf("%d%c", i, (i == s ? ’\n’ : ’ ’));
int main() { return 0;
}
const int m = 4;
const int n = 3; /*
DOUBLE _A[m][n] = { Sample input:
{ 6, -1, 0 }, 5 0 4
{ -1, -5, 0 }, 2 1 2 3 1
{ 1, 5, 1 }, 2 2 4 4 5
{ -1, -5, -1 } 3 1 4 3 3 4 1
}; 2 0 1 2 3
DOUBLE _b[m] = { 10, -4, 5, -5 }; 2 1 5 2 1
DOUBLE _c[n] = { 1, -1, 0 };
Expected:
VVD A(m); 5
VD b(_b, _b + m); 4 2 3 0
VD c(_c, _c + n); */
for (int i = 0; i < m; i++) A[i] = VD(_A[i], _A[i] + n);

LPSolver solver(A, b, c);


VD x;
DOUBLE value = solver.Solve(x);
4.2 Strongly connected components
cerr << "VALUE: " << value << endl; // VALUE: 1.29032
cerr << "SOLUTION:"; // SOLUTION: 1.74194 0.451613 1
for (size_t i = 0; i < x.size(); i++) cerr << " " << x[i]; #include<memory.h>
cerr << endl; struct edge{int e, nxt;};
return 0; int V, E;
} edge e[MAXE], er[MAXE];
int sp[MAXV], spr[MAXV];
int group_cnt, group_num[MAXV];
bool v[MAXV];
int stk[MAXV];
4 Graph algorithms void fill_forward(int x)
{
int i;
v[x]=true;
4.1 Fast Dijkstraś algorithm for(i=sp[x];i;i=e[i].nxt) if(!v[e[i].e]) fill_forward(e[i].e);
stk[++stk[0]]=x;
}
void fill_backward(int x)
// Implementation of Dijkstra’s algorithm using adjacency lists {
// and priority queue for efficiency. int i;
// v[x]=false;
// Running time: O(|E| log |V|) group_num[x]=group_cnt;
for(i=spr[x];i;i=er[i].nxt) if(v[er[i].e]) fill_backward(er[i].e);
#include <queue> }
#include <cstdio> void add_edge(int v1, int v2) //add edge v1->v2
{
using namespace std; e [++E].e=v2; e [E].nxt=sp [v1]; sp [v1]=E;

13
const int INF = 2000000000; er[ E].e=v1; er[E].nxt=spr[v2]; spr[v2]=E;
typedef pair<int, int> PII; }
Stanford University
void SCC() SuffixArray(const string &s) : L(s.length()), s(s), P(1, vector<int>(L, 0)), M(L) {
{ for (int i = 0; i < L; i++) P[0][i] = int(s[i]);
int i; for (int skip = 1, level = 1; skip < L; skip *= 2, level++) {
stk[0]=0; P.push_back(vector<int>(L, 0));
memset(v, false, sizeof(v)); for (int i = 0; i < L; i++)
for(i=1;i<=V;i++) if(!v[i]) fill_forward(i); M[i] = make_pair(make_pair(P[level-1][i], i + skip < L ? P[level-1][i + skip] : -1000), i);
group_cnt=0; sort(M.begin(), M.end());
for(i=stk[0];i>=1;i--) if(v[stk[i]]){group_cnt++; fill_backward(stk[i]);} for (int i = 0; i < L; i++)
} P[level][M[i].second] = (i > 0 && M[i].first == M[i-1].first) ? P[level][M[i-1].second] : i;
}
}

vector<int> GetSuffixArray() { return P.back(); }


4.3 Eulerian path // returns the length of the longest common prefix of s[i...L-1] and s[j...L-1]
int LongestCommonPrefix(int i, int j) {
int len = 0;
struct Edge; if (i == j) return L - i;
typedef list<Edge>::iterator iter; for (int k = P.size() - 1; k >= 0 && i < L && j < L; k--) {
if (P[k][i] == P[k][j]) {
struct Edge i += 1 << k;
{ j += 1 << k;
int next_vertex; len += 1 << k;
iter reverse_edge; }
}
Edge(int next_vertex) return len;
:next_vertex(next_vertex) }
{ } };
};
// BEGIN CUT
const int max_vertices = ; // The following code solves UVA problem 11512: GATTACA.
int num_vertices; #define TESTING
list<Edge> adj[max_vertices]; // adjacency list #ifdef TESTING
int main() {
vector<int> path; int T;
cin >> T;
void find_path(int v) for (int caseno = 0; caseno < T; caseno++) {
{ string s;
while(adj[v].size() > 0) cin >> s;
{ SuffixArray array(s);
int vn = adj[v].front().next_vertex; vector<int> v = array.GetSuffixArray();
adj[vn].erase(adj[v].front().reverse_edge); int bestlen = -1, bestpos = -1, bestcount = 0;
adj[v].pop_front(); for (int i = 0; i < s.length(); i++) {
find_path(vn); int len = 0, count = 0;
} for (int j = i+1; j < s.length(); j++) {
path.push_back(v); int l = array.LongestCommonPrefix(i, j);
} if (l >= len) {
if (l > len) count = 2; else count++;
void add_edge(int a, int b) len = l;
{ }
adj[a].push_front(Edge(b)); }
iter ita = adj[a].begin(); if (len > bestlen || len == bestlen && s.substr(bestpos, bestlen) > s.substr(i, len)) {
adj[b].push_front(Edge(a)); bestlen = len;
iter itb = adj[b].begin(); bestcount = count;
ita->reverse_edge = itb; bestpos = i;
itb->reverse_edge = ita; }
} }
if (bestlen == 0) {
cout << "No repetitions found!" << endl;
} else {
cout << s.substr(bestpos, bestlen) << " " << bestcount << endl;
5 Data structures }
}

5.1 Suffix array #else


// END CUT
int main() {

// Suffix array construction in O(L logˆ2 L) time. Routine for // bobocel is the 0’th suffix
// computing the length of the longest common prefix of any two // obocel is the 5’th suffix
// suffixes in O(log L) time. // bocel is the 1’st suffix
// // ocel is the 6’th suffix
// INPUT: string s // cel is the 2’nd suffix
// // el is the 3’rd suffix
// OUTPUT: array suffix[] such that suffix[i] = index (from 0 to L-1) // l is the 4’th suffix
// of substring s[i...L-1] in the list of sorted suffixes. SuffixArray suffix("bobocel");
// That is, if we take the inverse of the permutation suffix[], vector<int> v = suffix.GetSuffixArray();
// we get the actual suffix array.
// Expected output: 0 5 1 6 2 3 4
#include <vector> // 2
#include <iostream> for (int i = 0; i < v.size(); i++) cout << v[i] << " ";
#include <string> cout << endl;
cout << suffix.LongestCommonPrefix(0, 2) << endl;
using namespace std; }
// BEGIN CUT
struct SuffixArray { #endif
const int L; // END CUT
string s;
vector<vector<int> > P;

14
vector<pair<pair<int,int>,int> > M;
Stanford University
};

5.2 Binary Indexed Tree bool operator==(const point &a, const point &b)
{
return a.x == b.x && a.y == b.y;
}
#include <iostream>
using namespace std; // sorts points on x-coordinate
bool on_x(const point &a, const point &b)
#define LOGSZ 17 {
return a.x < b.x;
int tree[(1<<LOGSZ)+1]; }
int N = (1<<LOGSZ);
// sorts points on y-coordinate
// add v to value at x bool on_y(const point &a, const point &b)
void set(int x, int v) { {
while(x <= N) { return a.y < b.y;
tree[x] += v; }
x += (x & -x);
} // squared distance between points
} ntype pdist2(const point &a, const point &b)
{
// get cumulative sum up to and including x ntype dx = a.x-b.x, dy = a.y-b.y;
int get(int x) { return dx*dx + dy*dy;
int res = 0; }
while(x) {
res += tree[x]; // bounding box for a set of points
x -= (x & -x); struct bbox
} {
return res; ntype x0, x1, y0, y1;
}
bbox() : x0(sentry), x1(-sentry), y0(sentry), y1(-sentry) {}
// get largest value with cumulative sum less than or equal to x;
// for smallest, pass x-1 and add 1 to result // computes bounding box from a bunch of points
int getind(int x) { void compute(const vector<point> &v) {
int idx = 0, mask = N; for (int i = 0; i < v.size(); ++i) {
while(mask && idx < N) { x0 = min(x0, v[i].x); x1 = max(x1, v[i].x);
int t = idx + mask; y0 = min(y0, v[i].y); y1 = max(y1, v[i].y);
if(x >= tree[t]) { }
idx = t; }
x -= tree[t];
} // squared distance between a point and this bbox, 0 if inside
mask >>= 1; ntype distance(const point &p) {
} if (p.x < x0) {
return idx; if (p.y < y0) return pdist2(point(x0, y0), p);
} else if (p.y > y1) return pdist2(point(x0, y1), p);
else return pdist2(point(x0, p.y), p);
}
else if (p.x > x1) {
if (p.y < y0) return pdist2(point(x1, y0), p);
5.3 Union-find set else if (p.y > y1) return pdist2(point(x1, y1), p);
else return pdist2(point(x1, p.y), p);
}
//union-find set: the vector/array contains the parent of each node else {
int find(vector <int>& C, int x){return (C[x]==x) ? x : C[x]=find(C, C[x]);} //C++ if (p.y < y0) return pdist2(point(p.x, y0), p);
int find(int x){return (C[x]==x)?x:C[x]=find(C[x]);} //C else if (p.y > y1) return pdist2(point(p.x, y1), p);
else return 0;
}
}
};
5.4 KD-tree // stores a single node of the kd-tree, either internal or leaf
struct kdnode
// ----------------------------------------------------------------- {
// A straightforward, but probably sub-optimal KD-tree implmentation bool leaf; // true if this is a leaf node (has one point)
// that’s probably good enough for most things (current it’s a point pt; // the single point of this is a leaf
// 2D-tree) bbox bound; // bounding box for set of points in children
//
// - constructs from n points in O(n lgˆ2 n) time kdnode *first, *second; // two children of this kd-node
// - handles nearest-neighbor query in O(lg n) if points are well
// distributed kdnode() : leaf(false), first(0), second(0) {}
// - worst case for nearest-neighbor may be linear in pathological ˜kdnode() { if (first) delete first; if (second) delete second; }
// case
// // intersect a point with this node (returns squared distance)
// Sonny Chan, Stanford University, April 2009 ntype intersect(const point &p) {
// ----------------------------------------------------------------- return bound.distance(p);
}
#include <iostream>
#include <vector> // recursively builds a kd-tree from a given cloud of points
#include <limits> void construct(vector<point> &vp)
#include <cstdlib> {
// compute bounding box for points at this node
using namespace std; bound.compute(vp);

// number type for coordinates, and its maximum value // if we’re down to one point, then we’re a leaf node
typedef long long ntype; if (vp.size() == 1) {
const ntype sentry = numeric_limits<ntype>::max(); leaf = true;
pt = vp[0];
// point structure for 2D-tree, can be extended to 3D }
struct point { else {

15
ntype x, y; // split on x if the bbox is wider than high (not best heuristic...)
point(ntype xx = 0, ntype yy = 0) : x(xx), y(yy) {} if (bound.x1-bound.x0 >= bound.y1-bound.y0)
Stanford University
sort(vp.begin(), vp.end(), on_x);
// otherwise split on y-coordinate
else
sort(vp.begin(), vp.end(), on_y);
5.5 Splay tree
// divide by taking half the array for each child
// (not best performance if many duplicates in the middle) #include <cstdio>
int half = vp.size()/2; #include <algorithm>
vector<point> vl(vp.begin(), vp.begin()+half); using namespace std;
vector<point> vr(vp.begin()+half, vp.end());
first = new kdnode(); first->construct(vl); const int N_MAX = 130010;
second = new kdnode(); second->construct(vr); const int oo = 0x3f3f3f3f;
} struct Node
} {
}; Node *ch[2], *pre;
int val, size;
// simple kd-tree class to hold the tree and handle queries bool isTurned;
struct kdtree } nodePool[N_MAX], *null, *root;
{
kdnode *root; Node *allocNode(int val)
{
// constructs a kd-tree from a points (copied here, as it sorts them) static int freePos = 0;
kdtree(const vector<point> &vp) { Node *x = &nodePool[freePos ++];
vector<point> v(vp.begin(), vp.end()); x->val = val, x->isTurned = false;
root = new kdnode(); x->ch[0] = x->ch[1] = x->pre = null;
root->construct(v); x->size = 1;
} return x;
˜kdtree() { delete root; } }

// recursive search method returns squared distance to nearest point inline void update(Node *x)
ntype search(kdnode *node, const point &p) {
{ x->size = x->ch[0]->size + x->ch[1]->size + 1;
if (node->leaf) { }
// commented special case tells a point not to find itself
// if (p == node->pt) return sentry; inline void makeTurned(Node *x)
// else {
return pdist2(p, node->pt); if(x == null)
} return;
swap(x->ch[0], x->ch[1]);
ntype bfirst = node->first->intersect(p); x->isTurned ˆ= 1;
ntype bsecond = node->second->intersect(p); }

// choose the side with the closest bounding box to search first inline void pushDown(Node *x)
// (note that the other side is also searched if needed) {
if (bfirst < bsecond) { if(x->isTurned)
ntype best = search(node->first, p); {
if (bsecond < best) makeTurned(x->ch[0]);
best = min(best, search(node->second, p)); makeTurned(x->ch[1]);
return best; x->isTurned ˆ= 1;
} }
else { }
ntype best = search(node->second, p);
if (bfirst < best) inline void rotate(Node *x, int c)
best = min(best, search(node->first, p)); {
return best; Node *y = x->pre;
} x->pre = y->pre;
} if(y->pre != null)
y->pre->ch[y == y->pre->ch[1]] = x;
// squared distance to the nearest y->ch[!c] = x->ch[c];
ntype nearest(const point &p) { if(x->ch[c] != null)
return search(root, p); x->ch[c]->pre = y;
} x->ch[c] = y, y->pre = x;
}; update(y);
if(y == root)
// -------------------------------------------------------------------------- root = x;
// some basic test code here }

int main() void splay(Node *x, Node *p)


{ {
// generate some random points for a kd-tree while(x->pre != p)
vector<point> vp; {
for (int i = 0; i < 100000; ++i) { if(x->pre->pre == p)
vp.push_back(point(rand()%100000, rand()%100000)); rotate(x, x == x->pre->ch[0]);
} else
kdtree tree(vp); {
Node *y = x->pre, *z = y->pre;
// query some points if(y == z->ch[0])
for (int i = 0; i < 10; ++i) { {
point q(rand()%100000, rand()%100000); if(x == y->ch[0])
cout << "Closest squared distance to (" << q.x << ", " << q.y << ")" rotate(y, 1), rotate(x, 1);
<< " is " << tree.nearest(q) << endl; else
} rotate(x, 0), rotate(x, 1);
}
return 0; else
} {
if(x == y->ch[1])
// -------------------------------------------------------------------------- rotate(y, 0), rotate(x, 0);
else
rotate(x, 1), rotate(x, 0);
}

16
}
}
Stanford University
update(x); update(1,0,origSize-1,begin,end,val);
} }
public void update(int curr, int tBegin, int tEnd, int begin, int end, int val) {
void select(int k, Node *fa) if(tBegin >= begin && tEnd <= end)
{ update[curr] += val;
Node *now = root; else {
while(1) leaf[curr] += (Math.min(end,tEnd)-Math.max(begin,tBegin)+1) * val;
{ int mid = (tBegin+tEnd)/2;
pushDown(now); if(mid >= begin && tBegin <= end)
int tmp = now->ch[0]->size + 1; update(2*curr, tBegin, mid, begin, end, val);
if(tmp == k) if(tEnd >= begin && mid+1 <= end)
break; update(2*curr+1, mid+1, tEnd, begin, end, val);
else if(tmp < k) }
now = now->ch[1], k -= tmp; }
else public long query(int begin, int end) {
now = now->ch[0]; return query(1,0,origSize-1,begin,end);
} }
splay(now, fa); public long query(int curr, int tBegin, int tEnd, int begin, int end) {
} if(tBegin >= begin && tEnd <= end) {
if(update[curr] != 0) {
Node *makeTree(Node *p, int l, int r) leaf[curr] += (tEnd-tBegin+1) * update[curr];
{ if(2*curr < update.length){
if(l > r) update[2*curr] += update[curr];
return null; update[2*curr+1] += update[curr];
int mid = (l + r) / 2; }
Node *x = allocNode(mid); update[curr] = 0;
x->pre = p; }
x->ch[0] = makeTree(x, l, mid - 1); return leaf[curr];
x->ch[1] = makeTree(x, mid + 1, r); }
update(x); else {
return x; leaf[curr] += (tEnd-tBegin+1) * update[curr];
} if(2*curr < update.length){
update[2*curr] += update[curr];
int main() update[2*curr+1] += update[curr];
{ }
int n, m; update[curr] = 0;
null = allocNode(0); int mid = (tBegin+tEnd)/2;
null->size = 0; long ret = 0;
root = allocNode(0); if(mid >= begin && tBegin <= end)
root->ch[1] = allocNode(oo); ret += query(2*curr, tBegin, mid, begin, end);
root->ch[1]->pre = root; if(tEnd >= begin && mid+1 <= end)
update(root); ret += query(2*curr+1, mid+1, tEnd, begin, end);
return ret;
scanf("%d%d", &n, &m); }
root->ch[1]->ch[0] = makeTree(root->ch[1], 1, n); }
splay(root->ch[1]->ch[0], null); }

while(m --)
{
int a, b;
scanf("%d%d", &a, &b);
a ++, b ++;
5.7 Lowest common ancestor
select(a - 1, null);
select(b + 1, root); const int max_nodes, log_max_nodes;
makeTurned(root->ch[1]->ch[0]); int num_nodes, log_num_nodes, root;
}
vector<int> children[max_nodes]; // children[i] contains the children of node i
for(int i = 1; i <= n; i ++) int A[max_nodes][log_max_nodes+1]; // A[i][j] is the 2ˆj-th ancestor of node i, or -1 if that
{ ancestor does not exist
select(i + 1, null); int L[max_nodes]; // L[i] is the distance between node i and the root
printf("%d ", root->val);
} // floor of the binary logarithm of n
} int lb(unsigned int n)
{
if(n==0)
return -1;
int p = 0;
5.6 Lazy segment tree if (n >= 1<<16) { n >>= 16; p += 16;
if (n >= 1<< 8) { n >>= 8; p += 8;
}
}
if (n >= 1<< 4) { n >>= 4; p += 4; }
if (n >= 1<< 2) { n >>= 2; p += 2; }
public class SegmentTreeRangeUpdate { if (n >= 1<< 1) { p += 1; }
public long[] leaf; return p;
public long[] update; }
public int origSize;
public SegmentTreeRangeUpdate(int[] list) { void DFS(int i, int l)
origSize = list.length; {
leaf = new long[4*list.length]; L[i] = l;
update = new long[4*list.length]; for(int j = 0; j < children[i].size(); j++)
build(1,0,list.length-1,list); DFS(children[i][j], l+1);
} }
public void build(int curr, int begin, int end, int[] list) {
if(begin == end) int LCA(int p, int q)
leaf[curr] = list[begin]; {
else { // ensure node p is at least as deep as node q
int mid = (begin+end)/2; if(L[p] < L[q])
build(2 * curr, begin, mid, list); swap(p, q);
build(2 * curr + 1, mid+1, end, list);
leaf[curr] = leaf[2*curr] + leaf[2*curr+1]; // "binary search" for the ancestor of node p situated on the same level as q
} for(int i = log_num_nodes; i >= 0; i--)

17
} if(L[p] - (1<<i) >= L[q])
public void update(int begin, int end, int val) { p = A[p][i];
Stanford University
A[i][0] = p;
if(p == q) if(p != -1)
return p; children[p].push_back(i);
else
// "binary search" for the LCA root = i;
for(int i = log_num_nodes; i >= 0; i--) }
if(A[p][i] != -1 && A[p][i] != A[q][i])
{ // precompute A using dynamic programming
p = A[p][i]; for(int j = 1; j <= log_num_nodes; j++)
q = A[q][i]; for(int i = 0; i < num_nodes; i++)
} if(A[i][j-1] != -1)
A[i][j] = A[A[i][j-1]][j-1];
return A[p][0]; else
} A[i][j] = -1;

int main(int argc,char* argv[]) // precompute L


{ DFS(root, 0);
// read num_nodes, the total number of nodes
log_num_nodes=lb(num_nodes);
return 0;
for(int i = 0; i < num_nodes; i++) }
{
int p;
// read p, the parent of node i or -1 if node i is the root

18

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy