benchmarks/aggregate: read JSONL files directly (#6191)
his gets rid of the intermediate step of going through results_analyzer.
The test CSV files were converted to JSONL with this Perl script:
```
use warnings;
use strict;
use Text::CSV;
die if @ARGV == 0;
my $filename = $ARGV[0];
my @headers = ();
my %h = ();
my $csv = Text::CSV->new({ sep_char => ',' });
open(my $fh, '<', $filename) or die $!;
while (my $line = <$fh>) {
chomp $line;
if (@headers == 0) {
@headers = split(',', $line);
for (my $i = 0; $i < @headers; $i++) {
$h{$headers[$i]} = $i;
}
next;
}
if (!$csv->parse($line)) {
print STDERR "Cannot parse line: $line\n";
next;
}
my @f = $csv->fields();
my $model_name = $f[$h{model_name}];
my $accelerator_model = $f[$h{accelerator_model}];
my $xla = $f[$h{xla}] ? '"' . $f[$h{xla}] . '"' : "null";
my $dynamo = $f[$h{dynamo}];
my $test = $f[$h{test}];
my $batch_size = $f[$h{batch_size}] || "null";
my $median = $f[$h{median_total_time}];
my $total_time = $median ? "[15.150130984999123, $median]" : "null";
my $timestamp = $f[$h{timestamp}];
print "{\"model\": {\"suite_name\": \"torchbench\", \"model_name\": \"$model_name\"}, \"experiment\": {\"experiment_name\": \"run_all\", \"accelerator\": \"cuda\", \"accelerator_model\": \"$accelerator_model\", \"xla\": $xla, \"xla_flags\": null, \"dynamo\": \"$dynamo\", \"test\": \"$test\", \"batch_size\": $batch_size}, \"repeat\": 2, \"iterations_per_run\": 1, \"metrics\": {\"total_time\": $total_time, \"per_iter_time\": $total_time}, \"outputs_file\": null, \"timestamp\": $timestamp}\n";
}
close($fh);
```