Commit 1c5ca7ab authored by Ranadeep Biswas's avatar Ranadeep Biswas
Browse files

update

parent db434c50
version: "3.7"
services:
antidote1:
image: antidotedb/antidote
container_name: antidote1
hostname: antidote1
environment:
SHORT_NAME: "true"
NODE_NAME: antidote@antidote1
antidote2:
image: antidotedb/antidote
container_name: antidote2
hostname: antidote2
environment:
SHORT_NAME: "true"
NODE_NAME: antidote@antidote2
antidote3:
image: antidotedb/antidote
container_name: antidote3
hostname: antidote3
environment:
SHORT_NAME: "true"
NODE_NAME: antidote@antidote3
rpc:call(antidote@antidote1, inter_dc_manager, start_bg_processes, [stable]),
rpc:call(antidote@antidote2, inter_dc_manager, start_bg_processes, [stable]),
rpc:call(antidote@antidote3, inter_dc_manager, start_bg_processes, [stable]),
{ok, Desc1} = rpc:call(antidote@antidote1, inter_dc_manager, get_descriptor, []),
{ok, Desc2} = rpc:call(antidote@antidote2, inter_dc_manager, get_descriptor, []),
{ok, Desc3} = rpc:call(antidote@antidote3, inter_dc_manager, get_descriptor, []),
Descriptors = [Desc1, Desc2, Desc3],
rpc:call(antidote@antidote1, inter_dc_manager, observe_dcs_sync, [Descriptors]),
rpc:call(antidote@antidote2, inter_dc_manager, observe_dcs_sync, [Descriptors]),
rpc:call(antidote@antidote3, inter_dc_manager, observe_dcs_sync, [Descriptors]).
version: "3.7"
services:
antidote1:
image: cockroachdb/cockroach:v2.1.0
container_name: roach1
hostname: roach1
command:
- start
- --insecure
antidote2:
image: cockroachdb/cockroach:v2.1.0
container_name: roach2
hostname: roach2
command:
- start
- --insecure
- --join=roach1
antidote3:
image: cockroachdb/cockroach:v2.1.0
container_name: roach3
hostname: roach3
command:
- start
- --insecure
- --join=roach1
extern crate antidotedb;
extern crate byteorder;
extern crate clap;
extern crate dbcop;
use dbcop::db::cluster::{Cluster, ClusterNode, Node, TestParams};
use dbcop::db::history::Transaction;
use clap::{App, Arg};
use byteorder::{BigEndian, ReadBytesExt};
use std::io::Cursor;
use antidotedb::crdt::{Operation, LWWREG};
use antidotedb::AntidoteDB;
#[derive(Debug, Clone)]
pub struct AntidoteNode {
node: Node,
addr: String,
id: usize,
timestamp: Option<Vec<u8>>,
}
impl From<Node> for AntidoteNode {
fn from(node: Node) -> Self {
AntidoteNode {
node: node.clone(),
addr: format!("{}:8087", node.ip),
id: node.id,
timestamp: None,
}
}
}
impl ClusterNode for AntidoteNode {
fn exec_session(&self, hist: &mut Vec<Transaction>) {
let mut conn = AntidoteDB::connect_with_string(&self.addr);
let mut timestamp = self.timestamp.clone();
// println!("{:?}", timestamp);
hist.iter_mut().for_each(|transaction| {
let db_transaction = conn.start_transaction(timestamp.as_ref());
transaction.events.iter_mut().for_each(|event| {
let obj = LWWREG::new(&format!("{}", event.variable), "dbcop");
if event.write {
let op = obj.set(event.value as u64);
match conn.mult_update_in_transaction(&[op], &db_transaction) {
Ok(_) => event.success = true,
Err(_e) => {
assert_eq!(event.success, false);
// println!("WRITE ERR -- {:?}", _e);
}
}
} else {
match conn.mult_read_in_transaction(&[obj.clone()], &db_transaction) {
Ok(values) => {
let bytes = values[0].get_reg().get_value();
event.value =
Cursor::new(bytes).read_u64::<BigEndian>().unwrap() as usize;
event.success = true;
}
Err(_) => assert!(!event.success),
}
}
});
match conn.commit_transaction(&db_transaction) {
Ok(commit_time) => {
transaction.success = true;
timestamp = Some(commit_time);
}
Err(_e) => {
assert_eq!(transaction.success, false);
println!("{:?} -- COMMIT ERROR", transaction);
}
}
})
}
}
#[derive(Debug)]
pub struct AntidoteCluster(Vec<AntidoteNode>);
impl AntidoteCluster {
fn new(ips: &Vec<&str>) -> Self {
let mut v = AntidoteCluster::node_vec(ips);
let k: Vec<_> = v.drain(..).map(|x| From::from(x)).collect();
AntidoteCluster(k)
}
fn create_table(&self) -> bool {
true
}
fn create_variables(&mut self, n_variable: usize) {
let mut conn = AntidoteDB::connect_with_string(&self.get_antidote_addr(0).unwrap());
let db_transaction = conn.start_transaction(None);
let ops: Vec<_> = (0..n_variable)
.map(|variable| LWWREG::new(&format!("{}", variable), "dbcop").set(0))
.collect();
conn.mult_update_in_transaction(&ops, &db_transaction)
.expect("error to init zero values");
match conn.commit_transaction(&db_transaction) {
Ok(commit_time) => {
self.0.iter_mut().for_each(|x| {
x.timestamp = Some(commit_time.clone());
});
}
Err(_e) => {
println!("COMMIT ERROR while init");
}
}
}
fn drop_database(&self) {}
fn get_antidote_addr(&self, i: usize) -> Option<String> {
self.0.get(i).map(|ref node| node.addr.clone())
}
}
impl Cluster<AntidoteNode> for AntidoteCluster {
fn n_node(&self) -> usize {
self.0.len()
}
fn setup(&self) -> bool {
self.create_table()
}
fn get_node(&self, id: usize) -> Node {
self.0[id].node.clone()
}
fn get_cluster_node(&self, id: usize) -> AntidoteNode {
self.0[id].clone()
}
fn setup_test(&mut self, p: &TestParams) {
self.create_variables(p.n_variable);
}
fn cleanup(&self) {
self.drop_database();
}
}
fn main() {
let matches = App::new("Antidote")
.version("1.0")
.author("Ranadeep")
.about("verifies a Antidote cluster")
.arg(
Arg::with_name("n_variable")
.long("nval")
.short("v")
.default_value("5"),
)
.arg(
Arg::with_name("n_transaction")
.long("ntxn")
.short("t")
.default_value("5"),
)
.arg(
Arg::with_name("n_event")
.long("nevt")
.short("e")
.default_value("2"),
)
.arg(Arg::with_name("history_output").long("output").short("o"))
.arg(
Arg::with_name("ips")
.help("Cluster ips")
.multiple(true)
.required(true),
)
.get_matches();
let ips: Vec<_> = matches.values_of("ips").unwrap().collect();
let mut cluster = AntidoteCluster::new(&ips);
// println!("{:?}", cluster);
cluster.setup();
// test_id, n_variable, n_transaction, n_event
let params = TestParams {
n_variable: matches.value_of("n_variable").unwrap().parse().unwrap(),
n_transaction: matches.value_of("n_transaction").unwrap().parse().unwrap(),
n_event: matches.value_of("n_event").unwrap().parse().unwrap(),
..Default::default()
};
println!("{:?}", params);
cluster.test(&params);
}
extern crate clap;
extern crate dbcop;
extern crate postgres;
use dbcop::db::cluster::{Cluster, ClusterNode, Node, TestParams};
use dbcop::db::history::Transaction;
use clap::{App, Arg};
use postgres::{transaction, Connection, TlsMode};
#[derive(Debug)]
pub struct CockroachNode {
addr: String,
id: usize,
}
impl From<Node> for CockroachNode {
fn from(node: Node) -> Self {
CockroachNode {
addr: format!("postgresql://{}@{}:26257", "root", node.ip),
id: node.id,
}
}
}
impl ClusterNode for CockroachNode {
fn exec_session(&self, hist: &mut Vec<Transaction>) {
match Connection::connect(self.addr.clone(), TlsMode::None) {
Ok(conn) => hist.iter_mut().for_each(|transaction| {
let mut config = transaction::Config::new();
config.isolation_level(transaction::IsolationLevel::Serializable);
match conn.transaction_with(&config) {
Ok(sqltxn) => {
transaction.events.iter_mut().for_each(|event| {
if event.write {
match sqltxn.execute(
"UPDATE dbcop.variables SET val=$1 WHERE var=$2",
&[&(event.value as i64), &(event.variable as i64)],
) {
Ok(_) => event.success = true,
Err(_e) => {
assert_eq!(event.success, false);
// println!("WRITE ERR -- {:?}", _e);
}
}
} else {
match sqltxn.query(
"SELECT * FROM dbcop.variables WHERE var=$1",
&[&(event.variable as i64)],
) {
Ok(result) => {
if !result.is_empty() {
let mut row = result.get(0);
let value : i64 = row.get("val");
event.value = value as usize;
event.success = true;
} else {
// may be diverged
assert_eq!(event.success, false);
}
}
Err(_e) => {
// println!("READ ERR -- {:?}", _e);
assert_eq!(event.success, false);
}
}
}
});
match sqltxn.commit() {
Ok(_) => {
transaction.success = true;
}
Err(_e) => {
assert_eq!(transaction.success, false);
println!("{:?} -- COMMIT ERROR {}", transaction, _e);
}
}
}
Err(e) => println!("{:?} - TRANSACTION ERROR", e),
}
}),
Err(_e) => {
hist.iter().for_each(|transaction| {
assert_eq!(transaction.success, false);
});
// println!("CONNECTION ERROR {}", _e);}
}
}
}
}
#[derive(Debug)]
pub struct CockroachCluster(Vec<Node>);
impl CockroachCluster {
fn new(ips: &Vec<&str>) -> Self {
CockroachCluster(CockroachCluster::node_vec(ips))
}
fn create_table(&self) -> bool {
match self.get_postgresql_addr(0) {
Some(ip) => Connection::connect(ip, TlsMode::None)
.and_then(|pool| {
pool.execute("CREATE DATABASE IF NOT EXISTS dbcop", &[]).unwrap();
pool.execute("DROP TABLE IF EXISTS dbcop.variables", &[]).unwrap();
pool.execute(
"CREATE TABLE IF NOT EXISTS dbcop.variables (var INT NOT NULL PRIMARY KEY, val INT NOT NULL)", &[]
).unwrap();
// conn.query("USE dbcop").unwrap();
Ok(true)
}).is_ok(),
_ => false,
}
}
fn create_variables(&self, n_variable: usize) {
if let Some(ip) = self.get_postgresql_addr(0) {
if let Ok(conn) = Connection::connect(ip, TlsMode::None) {
for mut stmt in conn
.prepare("INSERT INTO dbcop.variables (var, val) values ($1, 0)")
.into_iter()
{
(0..n_variable).for_each(|variable| {
stmt.execute(&[&(variable as i64)]).unwrap();
});
}
}
}
}
fn drop_database(&self) {
if let Some(ip) = self.get_postgresql_addr(0) {
if let Ok(conn) = Connection::connect(ip, TlsMode::None) {
conn.execute("DROP DATABASE dbcop CASCADE", &[]).unwrap();
}
}
}
fn get_postgresql_addr(&self, i: usize) -> Option<String> {
match self.0.get(i) {
Some(ref node) => Some(format!("postgresql://{}@{}:26257", "root", node.ip)),
None => None,
}
}
}
impl Cluster<CockroachNode> for CockroachCluster {
fn n_node(&self) -> usize {
self.0.len()
}
fn setup(&self) -> bool {
self.create_table()
}
fn get_node(&self, id: usize) -> Node {
self.0[id].clone()
}
fn get_cluster_node(&self, id: usize) -> CockroachNode {
From::from(self.get_node(id))
}
fn setup_test(&mut self, p: &TestParams) {
self.create_variables(p.n_variable);
}
fn cleanup(&self) {
self.drop_database();
}
}
fn main() {
let matches = App::new("Cockroach")
.version("1.0")
.author("Ranadeep")
.about("verifies a Cockroach cluster")
.arg(
Arg::with_name("n_variable")
.long("nval")
.short("v")
.default_value("5"),
)
.arg(
Arg::with_name("n_transaction")
.long("ntxn")
.short("t")
.default_value("5"),
)
.arg(
Arg::with_name("n_event")
.long("nevt")
.short("e")
.default_value("2"),
)
.arg(
Arg::with_name("ips")
.help("Cluster ips")
.multiple(true)
.required(true),
)
.get_matches();
let ips: Vec<_> = matches.values_of("ips").unwrap().collect();
let mut cluster = CockroachCluster::new(&ips);
// println!("{:?}", cluster);
cluster.setup();
// test_id, n_variable, n_transaction, n_event
let params = TestParams {
id: 0,
n_variable: matches.value_of("n_variable").unwrap().parse().unwrap(),
n_transaction: matches.value_of("n_transaction").unwrap().parse().unwrap(),
n_event: matches.value_of("n_event").unwrap().parse().unwrap(),
};
println!("{:?}", params);
cluster.test(&params);
}
......@@ -27,7 +27,7 @@ impl ClusterNode for GaleraNode {
match mysql::Pool::new(self.addr.clone()) {
Ok(conn) => hist.iter_mut().for_each(|transaction| {
for mut sqltxn in conn.start_transaction(
false,
true,
Some(mysql::IsolationLevel::Serializable),
Some(false),
) {
......@@ -73,7 +73,7 @@ impl ClusterNode for GaleraNode {
}
Err(_e) => {
assert_eq!(transaction.success, false);
// println!("COMMIT ERROR {}", _e);
println!("{:?} -- COMMIT ERROR {}", transaction, _e);
}
}
}
......
import argparse
import itertools
import os
import subprocess
import uuid
import time
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--max_var", type=int, default=5)
parser.add_argument("-t", "--max_txn", type=int, default=15)
parser.add_argument("-e", "--max_evt", type=int, default=15)
args = parser.parse_args()
step = 1
tups = itertools.product(range(5, args.max_var + 1, step),
range(14, args.max_txn + 1, step),
range(14, args.max_evt + 1, step))
tups = sorted(tups, key=lambda x: sum(x))
# 5 var, 3 client, 4 txn, 3 event
for n_var, n_txn, n_evt in tups:
f_path = "{:03}_{:03}_{:03}_{}".format(
n_var, n_txn, n_evt, uuid.uuid4().hex[:5])
os.mkdir(f_path)
for i in range(2000):
with open(os.path.join(f_path, "{:06}.txt".format(i)), "w") as f:
# process = subprocess.Popen("../target/release/examples/cockroachdb -v{} -t{} -e{} 172.19.0.2 172.19.0.3 172.19.0.4".format(
# process = subprocess.Popen("../target/release/examples/galera -v{} -t{} -e{} 172.19.0.4 172.19.0.6 172.19.0.3 172.19.0.7 172.19.0.5".format(
process = subprocess.Popen("../target/release/examples/antidotedb -v{} -t{} -e{} 172.18.0.2 172.18.0.3 172.18.0.4".format(
n_var, n_txn, n_evt).split(), stdout=f, stderr=f)
process.wait()
# time.sleep(.2)
# for n_var, n_txn, n_evt in itertools.product([5], [4], [3]):
# for n_var, n_txn, n_evt in itertools.product([5], [4], [3]):
# f_path = "{:03}_{:03}_{:03}_{}".format(
# n_var, n_txn, n_evt, uuid.uuid4().hex[:5])
# os.mkdir(f_path)
# for i in range(1000):
# with open(os.path.join(f_path, "{:06}.txt".format(i)), "w") as f:
# # process = subprocess.Popen("../target/release/examples/cockroachdb -v{} -t{} -e{} 172.19.0.7 172.19.0.6 172.19.0.5 172.19.0.4 172.19.0.3".format(
# # process = subprocess.Popen("../target/release/examples/galera -v{} -t{} -e{} 172.19.0.4 172.19.0.6 172.19.0.3 172.19.0.7 172.19.0.5".format(
# process = subprocess.Popen("../target/release/examples/antidotedb -v{} -t{} -e{} 172.18.0.2 172.18.0.3 172.18.0.4".format(
# n_var, n_txn, n_evt).split(), stdout=f, stderr=f)
# process.wait()
# time.sleep(.2)
set -e
for i in {1..100}; do
cargo run --example cockroachdb --release -- -v 7 -t 10 -e 10 172.20.0.2 172.20.0.3 172.20.0.4 1> result/$(date +%s%N).txt 2>&1
done
import argparse
import subprocess
import json
parser = argparse.ArgumentParser(description='Get ips of a stack')
parser.add_argument('stack_name', metavar='name',
help='name of the docker stack')
parser.add_argument('-q', '--quiet', action="store_true",
help='Quiet')
args = parser.parse_args()
stack = args.stack_name
containers = subprocess.run(
'docker ps -q'.split(), stdout=subprocess.PIPE)
containers = containers.stdout.decode().strip().split("\n")
stack_containers = []
for container in containers:
info = subprocess.run(
'docker inspect {}'.format(container).split(),
stdout=subprocess.PIPE)
info = json.loads(info.stdout.decode().strip())
if info[0]['Config']['Labels']['com.docker.stack.namespace'] == stack:
ip = list(info[0]['NetworkSettings']
['Networks'].values())[0]['IPAddress']
stack_containers.append((info[0]['Id'], ip))
network_info = subprocess.run(
'docker network inspect docker_gwbridge'.split(), stdout=subprocess.PIPE)
network_info = json.loads(network_info.stdout.decode().strip())