Commit a39eed3e authored by Amelie Royer's avatar Amelie Royer

adding std::cout flush

parent cd0d2c6f
......@@ -21,19 +21,18 @@
template <typename M>
void mainMDP(M model, std::string datafile_base, int steps, float epsilon, bool precision,bool verbose) {
assert(("Model does not enable MDP mode", model.mdp_enabled()));
// Solve Model
auto start = std::chrono::high_resolution_clock::now();
std::cout << "\n" << current_time_str() << " - Starting MDP ValueIteration solver\n";
std::cout << "\n" << current_time_str() << " - Starting MDP ValueIteration solver\n" << std::flush;
AIToolbox::MDP::ValueIteration<decltype(model)> solver(steps, epsilon);
auto solution = solver(model);
std::cout << current_time_str() << " - Convergence criterion e = " << epsilon << " reached ? " << std::boolalpha << std::get<0>(solution) << "\n";
std::cout << current_time_str() << " - Convergence criterion e = " << epsilon << " reached ? " << std::boolalpha << std::get<0>(solution) << "\n" << std::flush;
auto elapsed = std::chrono::high_resolution_clock::now() - start;
double training_time = std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count() / 1000000.;
// Build and Evaluate Policy
start = std::chrono::high_resolution_clock::now();
std::cout << "\n" << current_time_str() << " - Starting evaluation!\n";
std::cout << "\n" << current_time_str() << " - Starting evaluation!\n" << std::flush;
AIToolbox::MDP::Policy policy(model.getO(), model.getA(), std::get<1>(solution));
std::cout << std::flush;
std::cerr << std::flush;
......@@ -42,7 +41,7 @@ void mainMDP(M model, std::string datafile_base, int steps, float epsilon, bool
double testing_time = std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count() / 1000000.;
// Output Times
std::cout << current_time_str() << " - Timings\n";
std::cout << current_time_str() << " - Timings\n" << std::flush;
std::cout << " > Training : " << training_time << "s\n";
std::cout << " > Testing : " << testing_time << "s\n";
}
......@@ -70,11 +69,13 @@ int main(int argc, char* argv[]) {
std::cout << "\n" << current_time_str() << " - Loading appropriate model\n";
if (!data.compare("reco")) {
Recomodel model (datafile_base + ".summary", discount, true);
assert(("Model does not enable MDP mode", model.mdp_enabled()));
model.load_rewards(datafile_base + ".rewards");
model.load_transitions(datafile_base + ".transitions", precision, precision, datafile_base + ".profiles");
mainMDP(model, datafile_base, steps, epsilon, precision, verbose);
} else if (!data.compare("maze")) {
Mazemodel model(datafile_base + ".summary", discount);
assert(("Model does not enable MDP mode", model.mdp_enabled()));
model.load_rewards(datafile_base + ".rewards");
model.load_transitions(datafile_base + ".transitions", precision, precision);
mainMDP(model, datafile_base, steps, epsilon, precision, verbose);
......
......@@ -23,7 +23,7 @@ void mainMEMDP(M model, std::string datafile_base, std::string algo, int horizon
// Training
double training_time, testing_time;
auto start = std::chrono::high_resolution_clock::now();
std::cout << "\n" << current_time_str() << " - Starting " << algo << " solver...!\n";
std::cout << "\n" << current_time_str() << " - Starting " << algo << " solver...!\n" <<std::flush;
// Evaluation
// POMCP
......@@ -31,7 +31,7 @@ void mainMEMDP(M model, std::string datafile_base, std::string algo, int horizon
AIToolbox::POMDP::POMCP<decltype(model)> solver( model, beliefSize, steps, exp);
training_time = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - start).count() / 1000000.;
start = std::chrono::high_resolution_clock::now();
std::cout << current_time_str() << " - Starting evaluation!\n";
std::cout << current_time_str() << " - Starting evaluation!\n" << std::flush;
std::cout << std::flush;
std::cerr << std::flush;
if (has_test) {
......@@ -46,7 +46,7 @@ void mainMEMDP(M model, std::string datafile_base, std::string algo, int horizon
AIToolbox::POMDP::MEMCP<decltype(model)> solver( model, beliefSize, steps, exp);
training_time = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - start).count() / 1000000.;
start = std::chrono::high_resolution_clock::now();
std::cout << current_time_str() << " - Starting evaluation!\n";
std::cout << current_time_str() << " - Starting evaluation!\n" << std::flush;
std::cout << std::flush;
std::cerr << std::flush;
if (has_test) {
......@@ -62,7 +62,7 @@ void mainMEMDP(M model, std::string datafile_base, std::string algo, int horizon
if (!verbose) {std::cerr.setstate(std::ios_base::failbit);}
auto solution = solver(model);
if (!verbose) {std::cerr.clear();}
std::cout << "\n" << current_time_str() << " - Convergence criterion reached: " << std::boolalpha << std::get<0>(solution) << "\n";
std::cout << "\n" << current_time_str() << " - Convergence criterion reached: " << std::boolalpha << std::get<0>(solution) << "\n" << std::flush;
int horizon_reached = std::get<2>(solution);
std::cout << "Horizon " << horizon_reached << " reached\n";
std::chrono::high_resolution_clock::now() - start;
......@@ -70,7 +70,7 @@ void mainMEMDP(M model, std::string datafile_base, std::string algo, int horizon
// Build and Evaluate Policy
start = std::chrono::high_resolution_clock::now();
std::cout << "\n" << current_time_str() << " - Starting evaluation!\n";
std::cout << "\n" << current_time_str() << " - Starting evaluation!\n" << std::flush;
AIToolbox::POMDP::Policy policy(model.getS(), model.getA(), model.getO(), std::get<1>(solution));
std::cout << std::flush;
std::cerr << std::flush;
......@@ -83,7 +83,7 @@ void mainMEMDP(M model, std::string datafile_base, std::string algo, int horizon
}
// Output Times
std::cout << current_time_str() << " - Timings\n";
std::cout << current_time_str() << " - Timings\n" << std::flush;
std::cout << " > Training : " << training_time << "s\n";
std::cout << " > Testing : " << testing_time << "s\n";
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment