@@ -23,27 +23,27 @@ |
||
23 | 23 | |
24 | 24 | // ARRAY TO TABLE |
25 | 25 | |
26 | -$rows=[ |
|
27 | - ['2017-01-01','XXXXX',123,1], |
|
28 | - ['2017-01-02','XXXXX',123,1], |
|
29 | - ['2017-01-03','XXXXX',123,1], |
|
30 | - ['2017-01-04','XXXXX',123,1], |
|
31 | - ['2017-01-05','XXXXX',123,1], |
|
32 | - ['2017-01-06','XXXXX',123,1], |
|
33 | - ['2017-01-07','XXXXX',123,1] |
|
26 | +$rows = [ |
|
27 | + ['2017-01-01', 'XXXXX', 123, 1], |
|
28 | + ['2017-01-02', 'XXXXX', 123, 1], |
|
29 | + ['2017-01-03', 'XXXXX', 123, 1], |
|
30 | + ['2017-01-04', 'XXXXX', 123, 1], |
|
31 | + ['2017-01-05', 'XXXXX', 123, 1], |
|
32 | + ['2017-01-06', 'XXXXX', 123, 1], |
|
33 | + ['2017-01-07', 'XXXXX', 123, 1] |
|
34 | 34 | ]; |
35 | 35 | |
36 | 36 | |
37 | 37 | |
38 | 38 | // Write to file array |
39 | -$temp_file_name='/tmp/_test_data.TSV'; |
|
39 | +$temp_file_name = '/tmp/_test_data.TSV'; |
|
40 | 40 | |
41 | 41 | |
42 | 42 | if (file_exists($temp_file_name)) unlink('/tmp/_test_data.TSV'); |
43 | 43 | foreach ($rows as $row) |
44 | 44 | { |
45 | 45 | |
46 | - file_put_contents($temp_file_name,\ClickHouseDB\Quote\FormatLine::TSV($row)."\n",FILE_APPEND); |
|
46 | + file_put_contents($temp_file_name, \ClickHouseDB\Quote\FormatLine::TSV($row) . "\n", FILE_APPEND); |
|
47 | 47 | |
48 | 48 | } |
49 | 49 |
@@ -62,8 +62,8 @@ discard block |
||
62 | 62 | |
63 | 63 | |
64 | 64 | |
65 | -echo "Count select rows:".$st->count()."\n"; |
|
66 | -echo "Count all rows:".$st->countAll()."\n"; |
|
65 | +echo "Count select rows:" . $st->count() . "\n"; |
|
66 | +echo "Count all rows:" . $st->countAll() . "\n"; |
|
67 | 67 | echo "First row:\n"; |
68 | 68 | print_r($st->fetchOne()); |
69 | 69 | |
@@ -75,16 +75,16 @@ discard block |
||
75 | 75 | |
76 | 76 | |
77 | 77 | |
78 | -$st=$db->select('SELECT event_date,url_hash,sum(views),avg(views) FROM summing_url_views WHERE site_id<3333 GROUP BY event_date,url_hash WITH TOTALS'); |
|
78 | +$st = $db->select('SELECT event_date,url_hash,sum(views),avg(views) FROM summing_url_views WHERE site_id<3333 GROUP BY event_date,url_hash WITH TOTALS'); |
|
79 | 79 | |
80 | 80 | |
81 | 81 | |
82 | 82 | |
83 | -echo "Count select rows:".$st->count()."\n"; |
|
83 | +echo "Count select rows:" . $st->count() . "\n"; |
|
84 | 84 | /* |
85 | 85 | 2 |
86 | 86 | */ |
87 | -echo "Count all rows:".$st->countAll()."\n"; |
|
87 | +echo "Count all rows:" . $st->countAll() . "\n"; |
|
88 | 88 | /* |
89 | 89 | false |
90 | 90 | */ |
@@ -140,7 +140,7 @@ discard block |
||
140 | 140 | ) |
141 | 141 | */ |
142 | 142 | $db->write("DROP TABLE IF EXISTS summing_url_views"); |
143 | -echo "Tables EXISTS:".json_encode($db->showTables())."\n"; |
|
143 | +echo "Tables EXISTS:" . json_encode($db->showTables()) . "\n"; |
|
144 | 144 | /* |
145 | 145 | Tables EXISTS:[] |
146 | 146 | */ |
147 | 147 | \ No newline at end of file |
@@ -51,15 +51,15 @@ discard block |
||
51 | 51 | echo "use time:" . round(microtime(true) - $time_start, 2) . "\n"; |
52 | 52 | print_r($db->select('select sum(views) from summing_url_views')->rows()); |
53 | 53 | // ------------------------------------------------------------------------------------------------ |
54 | -$WriteToFile=new ClickHouseDB\Query\WriteToFile('/tmp/_1_select.csv'); |
|
55 | -$statement=$db->select('select * from summing_url_views',[],null,$WriteToFile); |
|
54 | +$WriteToFile = new ClickHouseDB\Query\WriteToFile('/tmp/_1_select.csv'); |
|
55 | +$statement = $db->select('select * from summing_url_views', [], null, $WriteToFile); |
|
56 | 56 | print_r($statement->info()); |
57 | 57 | |
58 | 58 | // |
59 | -$db->selectAsync('select * from summing_url_views limit 4',[],null,new ClickHouseDB\Query\WriteToFile('/tmp/_2_select.csv')); |
|
60 | -$db->selectAsync('select * from summing_url_views limit 4',[],null,new ClickHouseDB\Query\WriteToFile('/tmp/_3_select.tab',true,'TabSeparatedWithNames')); |
|
61 | -$db->selectAsync('select * from summing_url_views limit 4',[],null,new ClickHouseDB\Query\WriteToFile('/tmp/_4_select.tab',true,'TabSeparated')); |
|
62 | -$statement=$db->selectAsync('select * from summing_url_views limit 54',[],null,new ClickHouseDB\Query\WriteToFile('/tmp/_5_select.csv',true,ClickHouseDB\Query\WriteToFile::FORMAT_CSV)); |
|
59 | +$db->selectAsync('select * from summing_url_views limit 4', [], null, new ClickHouseDB\Query\WriteToFile('/tmp/_2_select.csv')); |
|
60 | +$db->selectAsync('select * from summing_url_views limit 4', [], null, new ClickHouseDB\Query\WriteToFile('/tmp/_3_select.tab', true, 'TabSeparatedWithNames')); |
|
61 | +$db->selectAsync('select * from summing_url_views limit 4', [], null, new ClickHouseDB\Query\WriteToFile('/tmp/_4_select.tab', true, 'TabSeparated')); |
|
62 | +$statement = $db->selectAsync('select * from summing_url_views limit 54', [], null, new ClickHouseDB\Query\WriteToFile('/tmp/_5_select.csv', true, ClickHouseDB\Query\WriteToFile::FORMAT_CSV)); |
|
63 | 63 | $db->executeAsync(); |
64 | 64 | |
65 | 65 | print_r($statement->info()); |
@@ -68,11 +68,11 @@ discard block |
||
68 | 68 | |
69 | 69 | echo "TRY GZIP\n"; |
70 | 70 | |
71 | -$WriteToFile=new ClickHouseDB\Query\WriteToFile('/tmp/_0_select.csv.gz'); |
|
71 | +$WriteToFile = new ClickHouseDB\Query\WriteToFile('/tmp/_0_select.csv.gz'); |
|
72 | 72 | $WriteToFile->setFormat(ClickHouseDB\Query\WriteToFile::FORMAT_TabSeparatedWithNames); |
73 | -$WriteToFile->setGzip(true);// cat /tmp/_0_select.csv.gz | gzip -dc > /tmp/w.result |
|
73 | +$WriteToFile->setGzip(true); // cat /tmp/_0_select.csv.gz | gzip -dc > /tmp/w.result |
|
74 | 74 | |
75 | -$statement=$db->select('select * from summing_url_views',[],null,$WriteToFile); |
|
75 | +$statement = $db->select('select * from summing_url_views', [], null, $WriteToFile); |
|
76 | 76 | print_r($statement->info()); |
77 | 77 | |
78 | 78 | echo "OK!\n\n"; |
79 | 79 | \ No newline at end of file |
@@ -9,7 +9,7 @@ discard block |
||
9 | 9 | class progress { |
10 | 10 | public static function printz($data) |
11 | 11 | { |
12 | - echo "CALL CLASS: ".json_encode($data)."\n"; |
|
12 | + echo "CALL CLASS: " . json_encode($data) . "\n"; |
|
13 | 13 | } |
14 | 14 | } |
15 | 15 | |
@@ -22,14 +22,14 @@ discard block |
||
22 | 22 | |
23 | 23 | |
24 | 24 | // ---------------------------------------- ---------------------------------------- |
25 | -$db->progressFunction(function ($data) { |
|
26 | - echo "CALL FUNCTION:".json_encode($data)."\n"; |
|
25 | +$db->progressFunction(function($data) { |
|
26 | + echo "CALL FUNCTION:" . json_encode($data) . "\n"; |
|
27 | 27 | }); |
28 | -$st=$db->select('SELECT number,sleep(0.2) FROM system.numbers limit 5'); |
|
28 | +$st = $db->select('SELECT number,sleep(0.2) FROM system.numbers limit 5'); |
|
29 | 29 | |
30 | 30 | |
31 | 31 | // ---------------------------------------- ---------------------------------------- |
32 | 32 | $db->settings()->set('http_headers_progress_interval_ms', 15); // change interval |
33 | 33 | |
34 | -$db->progressFunction(['progress','printz']); |
|
35 | -$st=$db->select('SELECT number,sleep(0.1) FROM system.numbers limit 5'); |
|
36 | 34 | \ No newline at end of file |
35 | +$db->progressFunction(['progress', 'printz']); |
|
36 | +$st = $db->select('SELECT number,sleep(0.1) FROM system.numbers limit 5'); |
|
37 | 37 | \ No newline at end of file |
@@ -32,7 +32,7 @@ |
||
32 | 32 | |
33 | 33 | // --------------------- $config['https']=true; -------------------------------- |
34 | 34 | |
35 | -$config['https']=true; |
|
35 | +$config['https'] = true; |
|
36 | 36 | |
37 | 37 | $db = new ClickHouseDB\Client($config); |
38 | 38 | $db->verbose(); |
@@ -7,10 +7,10 @@ discard block |
||
7 | 7 | |
8 | 8 | class CustomDegeneration implements \ClickHouseDB\Query\Degeneration |
9 | 9 | { |
10 | - private $bindings=[]; |
|
10 | + private $bindings = []; |
|
11 | 11 | public function bindParams(array $bindings) |
12 | 12 | { |
13 | - $this->bindings=$bindings; |
|
13 | + $this->bindings = $bindings; |
|
14 | 14 | } |
15 | 15 | public function process($sql) |
16 | 16 | { |
@@ -18,10 +18,10 @@ discard block |
||
18 | 18 | { |
19 | 19 | foreach ($this->bindings as $key=>$value) |
20 | 20 | { |
21 | - $sql=str_ireplace('%'.$key.'%',$value,$sql); |
|
21 | + $sql = str_ireplace('%' . $key . '%', $value, $sql); |
|
22 | 22 | } |
23 | 23 | } |
24 | - return str_ireplace('XXXX','SELECT',$sql); |
|
24 | + return str_ireplace('XXXX', 'SELECT', $sql); |
|
25 | 25 | } |
26 | 26 | } |
27 | 27 | |
@@ -45,4 +45,4 @@ discard block |
||
45 | 45 | |
46 | 46 | |
47 | 47 | // SELECT 1 as ping |
48 | -print_r($db->select('XXXX 1 as %ZX%',['ZX'=>'ping'])->fetchOne()); |
|
48 | +print_r($db->select('XXXX 1 as %ZX%', ['ZX'=>'ping'])->fetchOne()); |
@@ -63,7 +63,7 @@ |
||
63 | 63 | $db->executeAsync(); |
64 | 64 | |
65 | 65 | $stat = $db->insertBatchFiles('summing_url_views', ['/tmp/clickHouseDB_test.1.data'], [ |
66 | - 'event_time', 'url_hash', 'site_id', 'views', 'v_00', 'v_55' |
|
66 | + 'event_time', 'url_hash', 'site_id', 'views', 'v_00', 'v_55' |
|
67 | 67 | ]); |
68 | 68 | |
69 | 69 | $statselect1 = $db->selectAsync('SELECT * FROM summing_url_views LIMIT 1'); |
@@ -8,11 +8,11 @@ discard block |
||
8 | 8 | |
9 | 9 | |
10 | 10 | $db = new ClickHouseDB\Client($config); |
11 | -$db->settings()->set('replication_alter_partitions_sync',2); |
|
12 | -$db->settings()->set('experimental_allow_extended_storage_definition_syntax',1); |
|
11 | +$db->settings()->set('replication_alter_partitions_sync', 2); |
|
12 | +$db->settings()->set('experimental_allow_extended_storage_definition_syntax', 1); |
|
13 | 13 | |
14 | 14 | |
15 | -for ( $looop=1;$looop<100;$looop++) |
|
15 | +for ($looop = 1; $looop < 100; $looop++) |
|
16 | 16 | { |
17 | 17 | |
18 | 18 | $db->write("DROP TABLE IF EXISTS testoperation_log"); |
@@ -26,29 +26,29 @@ discard block |
||
26 | 26 | "); |
27 | 27 | |
28 | 28 | echo "INSERT DATA....\n"; |
29 | - for ($z=0;$z<1000;$z++) |
|
29 | + for ($z = 0; $z < 1000; $z++) |
|
30 | 30 | { |
31 | - $dataInsert=['time'=>strtotime('-'.mt_rand(0,4000).' day'),'event'=>strval($z)]; |
|
31 | + $dataInsert = ['time'=>strtotime('-' . mt_rand(0, 4000) . ' day'), 'event'=>strval($z)]; |
|
32 | 32 | try { |
33 | - $db->insertAssocBulk('testoperation_log',$dataInsert); |
|
33 | + $db->insertAssocBulk('testoperation_log', $dataInsert); |
|
34 | 34 | echo "$z\r"; |
35 | 35 | } |
36 | 36 | catch (Exception $exception) |
37 | 37 | { |
38 | - die("Error:".$exception->getMessage()); |
|
38 | + die("Error:" . $exception->getMessage()); |
|
39 | 39 | } |
40 | 40 | |
41 | 41 | } |
42 | 42 | echo "INSER OK\n DROP PARTITION...\n"; |
43 | 43 | |
44 | - $partitons=($db->partitions('testoperation_log')); |
|
44 | + $partitons = ($db->partitions('testoperation_log')); |
|
45 | 45 | foreach ($partitons as $part) |
46 | 46 | { |
47 | - echo "$looop\t\t".$part['partition']."\t".$part['name']."\t".$part['active']."\r"; |
|
47 | + echo "$looop\t\t" . $part['partition'] . "\t" . $part['name'] . "\t" . $part['active'] . "\r"; |
|
48 | 48 | |
49 | - $db->dropPartition('default.testoperation_log',$part['partition']); |
|
49 | + $db->dropPartition('default.testoperation_log', $part['partition']); |
|
50 | 50 | } |
51 | - echo "SELECT count() ...".str_repeat(" ",300)."\n"; |
|
51 | + echo "SELECT count() ..." . str_repeat(" ", 300) . "\n"; |
|
52 | 52 | print_r($db->select('SELECT count() FROM default.testoperation_log')->rows()); |
53 | 53 | } |
54 | 54 |
@@ -11,12 +11,12 @@ |
||
11 | 11 | $cl->setScanTimeOut(2.5); // 2500 ms |
12 | 12 | if (!$cl->isReplicasIsOk()) |
13 | 13 | { |
14 | - throw new Exception('Replica state is bad , error='.$cl->getError()); |
|
14 | + throw new Exception('Replica state is bad , error=' . $cl->getError()); |
|
15 | 15 | } |
16 | 16 | // |
17 | -$cluster_name='sharovara'; |
|
17 | +$cluster_name = 'sharovara'; |
|
18 | 18 | // |
19 | -echo "> $cluster_name , count shard = ".$cl->getClusterCountShard($cluster_name)." ; count replica = ".$cl->getClusterCountReplica($cluster_name)."\n"; |
|
19 | +echo "> $cluster_name , count shard = " . $cl->getClusterCountShard($cluster_name) . " ; count replica = " . $cl->getClusterCountReplica($cluster_name) . "\n"; |
|
20 | 20 | |
21 | 21 | |
22 | 22 |