@@ -1,4 +1,4 @@ discard block |
||
1 | -<?php declare(strict_types=1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
1 | +<?php declare(strict_types = 1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
2 | 2 | |
3 | 3 | class MangaHere extends Base_Site_Model { |
4 | 4 | public $titleFormat = '/^[a-z0-9_]+$/'; |
@@ -15,7 +15,7 @@ discard block |
||
15 | 15 | ]; |
16 | 16 | } |
17 | 17 | |
18 | - public function getTitleData(string $title_url, bool $firstGet = FALSE) : ?array { |
|
18 | + public function getTitleData(string $title_url, bool $firstGet = FALSE) : ? array { |
|
19 | 19 | $titleData = []; |
20 | 20 | |
21 | 21 | $fullURL = $this->getFullTitleURL($title_url); |
@@ -35,8 +35,8 @@ discard block |
||
35 | 35 | |
36 | 36 | $link = preg_replace('/^(.*\/)(?:[0-9]+\.html)?$/', '$1', (string) $data['nodes_chapter']->getAttribute('href')); |
37 | 37 | $chapterURLSegments = explode('/', $link); |
38 | - $titleData['latest_chapter'] = $chapterURLSegments[5] . (isset($chapterURLSegments[6]) && !empty($chapterURLSegments[6]) ? "/{$chapterURLSegments[6]}" : ""); |
|
39 | - $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $data['nodes_latest']->nodeValue)); |
|
38 | + $titleData['latest_chapter'] = $chapterURLSegments[5].(isset($chapterURLSegments[6]) && !empty($chapterURLSegments[6]) ? "/{$chapterURLSegments[6]}" : ""); |
|
39 | + $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $data['nodes_latest']->nodeValue)); |
|
40 | 40 | } |
41 | 41 | |
42 | 42 | return (!empty($titleData) ? $titleData : NULL); |
@@ -1,4 +1,4 @@ discard block |
||
1 | -<?php declare(strict_types=1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
1 | +<?php declare(strict_types = 1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
2 | 2 | |
3 | 3 | class KissManga extends Base_Site_Model { |
4 | 4 | /* This site is a massive pain in the ass. The only reason I'm supporting it is it's one of the few aggregator sites which actually support more risqué manga. |
@@ -30,7 +30,7 @@ discard block |
||
30 | 30 | ]; |
31 | 31 | } |
32 | 32 | |
33 | - public function getTitleData(string $title_url, bool $firstGet = FALSE) : ?array { |
|
33 | + public function getTitleData(string $title_url, bool $firstGet = FALSE) : ? array { |
|
34 | 34 | $titleData = []; |
35 | 35 | |
36 | 36 | //Check if cookiejar is a day old (so we can know if something went wrong) |
@@ -59,13 +59,13 @@ discard block |
||
59 | 59 | $titleData['title'] = $nodes_title->item(0)->textContent; |
60 | 60 | |
61 | 61 | $firstRow = $nodes_row->item(0); |
62 | - $nodes_latest = $xpath->query("td[2]", $firstRow); |
|
62 | + $nodes_latest = $xpath->query("td[2]", $firstRow); |
|
63 | 63 | $nodes_chapter = $xpath->query("td[1]/a", $firstRow); |
64 | 64 | |
65 | 65 | $link = (string) $nodes_chapter->item(0)->getAttribute('href'); |
66 | 66 | $chapterURLSegments = explode('/', preg_replace('/\?.*$/', '', $link)); |
67 | - $titleData['latest_chapter'] = $chapterURLSegments[3] . ':--:' . preg_replace('/.*?([0-9]+)$/', '$1', $link); |
|
68 | - $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $nodes_latest->item(0)->textContent)); |
|
67 | + $titleData['latest_chapter'] = $chapterURLSegments[3].':--:'.preg_replace('/.*?([0-9]+)$/', '$1', $link); |
|
68 | + $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $nodes_latest->item(0)->textContent)); |
|
69 | 69 | } |
70 | 70 | } else { |
71 | 71 | //TODO: Throw ERRORS; |
@@ -1,4 +1,4 @@ discard block |
||
1 | -<?php declare(strict_types=1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
1 | +<?php declare(strict_types = 1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
2 | 2 | |
3 | 3 | class Batoto extends Base_Site_Model { |
4 | 4 | //Batoto is a bit tricky to track. Unlike MangaFox and MangaHere, it doesn't store anything in the title_url, which means we have to get the data via other methods. |
@@ -24,12 +24,12 @@ discard block |
||
24 | 24 | |
25 | 25 | $chapter_parts = explode(':--:', $chapter); |
26 | 26 | return [ |
27 | - 'url' => "https://bato.to/reader#" . $chapter_parts[0], |
|
27 | + 'url' => "https://bato.to/reader#".$chapter_parts[0], |
|
28 | 28 | 'number' => $chapter_parts[1] |
29 | 29 | ]; |
30 | 30 | } |
31 | 31 | |
32 | - public function getTitleData(string $title_url, bool $firstGet = FALSE) : ?array { |
|
32 | + public function getTitleData(string $title_url, bool $firstGet = FALSE) : ? array { |
|
33 | 33 | $titleData = []; |
34 | 34 | |
35 | 35 | $title_parts = explode(':--:', $title_url); |
@@ -58,7 +58,7 @@ discard block |
||
58 | 58 | $titleData['title'] = html_entity_decode(trim($data['nodes_title']->textContent)); |
59 | 59 | |
60 | 60 | preg_match('/^(?:Vol\.(?<volume>\S+) )?(?:Ch.(?<chapter>[^\s:]+)(?:\s?-\s?(?<extra>[0-9]+))?):?.*/', trim($data['nodes_chapter']->nodeValue), $text); |
61 | - $titleData['latest_chapter'] = substr($data['nodes_chapter']->getAttribute('href'), 23) . ':--:' . ((!empty($text['volume']) ? 'v'.$text['volume'].'/' : '') . 'c'.$text['chapter'] . (!empty($text['extra']) ? '-'.$text['extra'] : '')); |
|
61 | + $titleData['latest_chapter'] = substr($data['nodes_chapter']->getAttribute('href'), 23).':--:'.((!empty($text['volume']) ? 'v'.$text['volume'].'/' : '').'c'.$text['chapter'].(!empty($text['extra']) ? '-'.$text['extra'] : '')); |
|
62 | 62 | |
63 | 63 | $dateString = $data['nodes_latest']->nodeValue; |
64 | 64 | if($dateString == 'An hour ago') { |
@@ -167,7 +167,7 @@ discard block |
||
167 | 167 | |
168 | 168 | $chapter = $nodes_chapter->item(0); |
169 | 169 | preg_match('/^(?:Vol\.(?<volume>\S+) )?(?:Ch.(?<chapter>[^\s:]+)(?:\s?-\s?(?<extra>[0-9]+))?):?.*/', trim($chapter->nodeValue), $text); |
170 | - $titleData['latest_chapter'] = substr($chapter->getAttribute('href'), 8) . ':--:' . ((!empty($text['volume']) ? 'v' . $text['volume'] . '/' : '') . 'c' . $text['chapter'] . (!empty($text['extra']) ? '-' . $text['extra'] : '')); |
|
170 | + $titleData['latest_chapter'] = substr($chapter->getAttribute('href'), 8).':--:'.((!empty($text['volume']) ? 'v'.$text['volume'].'/' : '').'c'.$text['chapter'].(!empty($text['extra']) ? '-'.$text['extra'] : '')); |
|
171 | 171 | |
172 | 172 | $dateString = $nodes_latest->item(0)->nodeValue; |
173 | 173 | if($dateString == 'An hour ago') { |
@@ -1,4 +1,4 @@ discard block |
||
1 | -<?php declare(strict_types=1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
1 | +<?php declare(strict_types = 1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
2 | 2 | |
3 | 3 | class MangaStream extends Base_Site_Model { |
4 | 4 | public $titleFormat = '/^[a-z0-9_]+$/'; |
@@ -15,7 +15,7 @@ discard block |
||
15 | 15 | ]; |
16 | 16 | } |
17 | 17 | |
18 | - public function getTitleData(string $title_url, bool $firstGet = FALSE) : ?array { |
|
18 | + public function getTitleData(string $title_url, bool $firstGet = FALSE) : ? array { |
|
19 | 19 | $titleData = []; |
20 | 20 | |
21 | 21 | $fullURL = $this->getFullTitleURL($title_url); |
@@ -35,7 +35,7 @@ discard block |
||
35 | 35 | |
36 | 36 | $titleData['latest_chapter'] = preg_replace('/^.*\/(.*?\/[0-9]+)\/[0-9]+$/', '$1', (string) $data['nodes_chapter']->getAttribute('href')); |
37 | 37 | |
38 | - $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $data['nodes_latest']->nodeValue)); |
|
38 | + $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $data['nodes_latest']->nodeValue)); |
|
39 | 39 | } |
40 | 40 | |
41 | 41 | return (!empty($titleData) ? $titleData : NULL); |
@@ -1,4 +1,4 @@ discard block |
||
1 | -<?php declare(strict_types=1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
1 | +<?php declare(strict_types = 1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
2 | 2 | |
3 | 3 | class EGScans extends Base_Site_Model { |
4 | 4 | public $titleFormat = '/^[A-Za-z0-9\-_\!,]+$/'; |
@@ -15,7 +15,7 @@ discard block |
||
15 | 15 | ]; |
16 | 16 | } |
17 | 17 | |
18 | - public function getTitleData(string $title_url, bool $firstGet = FALSE) : ?array { |
|
18 | + public function getTitleData(string $title_url, bool $firstGet = FALSE) : ? array { |
|
19 | 19 | $titleData = []; |
20 | 20 | |
21 | 21 | $fullURL = $this->getFullTitleURL($title_url); |
@@ -1,4 +1,4 @@ discard block |
||
1 | -<?php declare(strict_types=1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
1 | +<?php declare(strict_types = 1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
2 | 2 | |
3 | 3 | class MangaPanda extends Base_Site_Model { |
4 | 4 | //NOTE: MangaPanda has manga pages under the root URL, so we need to filter out pages we know that aren't manga. |
@@ -16,7 +16,7 @@ discard block |
||
16 | 16 | ]; |
17 | 17 | } |
18 | 18 | |
19 | - public function getTitleData(string $title_url, bool $firstGet = FALSE) : ?array { |
|
19 | + public function getTitleData(string $title_url, bool $firstGet = FALSE) : ? array { |
|
20 | 20 | $titleData = []; |
21 | 21 | |
22 | 22 | $fullURL = $this->getFullTitleURL($title_url); |
@@ -35,7 +35,7 @@ discard block |
||
35 | 35 | |
36 | 36 | $titleData['latest_chapter'] = preg_replace('/^.*\/([0-9]+)$/', '$1', (string) $data['nodes_chapter']->getAttribute('href')); |
37 | 37 | |
38 | - $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $data['nodes_latest']->nodeValue)); |
|
38 | + $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $data['nodes_latest']->nodeValue)); |
|
39 | 39 | } |
40 | 40 | |
41 | 41 | return (!empty($titleData) ? $titleData : NULL); |
@@ -1,4 +1,4 @@ discard block |
||
1 | -<?php declare(strict_types=1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
1 | +<?php declare(strict_types = 1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
2 | 2 | |
3 | 3 | class GameOfScanlation extends Base_Site_Model { |
4 | 4 | public $titleFormat = '/^[a-z0-9\.-]+$/'; |
@@ -9,7 +9,7 @@ discard block |
||
9 | 9 | The bad thing is these are interchangeable, despite them showing the exact same listing page. |
10 | 10 | Thankfully the title_url of manga which use /forums/ seem to be appended with ".%ID%" which means we can easily check them. */ |
11 | 11 | |
12 | - if (strpos($title_url, '.') !== FALSE) { |
|
12 | + if(strpos($title_url, '.') !== FALSE) { |
|
13 | 13 | $format = "https://gameofscanlation.moe/forums/{$title_url}/"; |
14 | 14 | } else { |
15 | 15 | $format = "https://gameofscanlation.moe/projects/{$title_url}/"; |
@@ -24,7 +24,7 @@ discard block |
||
24 | 24 | ]; |
25 | 25 | } |
26 | 26 | |
27 | - public function getTitleData(string $title_url, bool $firstGet = FALSE) : ?array { |
|
27 | + public function getTitleData(string $title_url, bool $firstGet = FALSE) : ? array { |
|
28 | 28 | $titleData = []; |
29 | 29 | |
30 | 30 | $fullURL = $this->getFullTitleURL($title_url); |
@@ -44,7 +44,7 @@ discard block |
||
44 | 44 | |
45 | 45 | $titleData['latest_chapter'] = preg_replace('/^projects\/.*?\/(.*?)\/$/', '$1', (string) $data['nodes_chapter']->getAttribute('href')); |
46 | 46 | |
47 | - $titleData['last_updated'] = date("Y-m-d H:i:s",(int) $data['nodes_latest']->getAttribute('title')); |
|
47 | + $titleData['last_updated'] = date("Y-m-d H:i:s", (int) $data['nodes_latest']->getAttribute('title')); |
|
48 | 48 | } |
49 | 49 | |
50 | 50 | return (!empty($titleData) ? $titleData : NULL); |
@@ -1,4 +1,4 @@ discard block |
||
1 | -<?php declare(strict_types=1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
1 | +<?php declare(strict_types = 1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
2 | 2 | |
3 | 3 | class MangaFox extends Base_Site_Model { |
4 | 4 | public $titleFormat = '/^[a-z0-9_]+$/'; |
@@ -15,7 +15,7 @@ discard block |
||
15 | 15 | ]; |
16 | 16 | } |
17 | 17 | |
18 | - public function getTitleData(string $title_url, bool $firstGet = FALSE) : ?array { |
|
18 | + public function getTitleData(string $title_url, bool $firstGet = FALSE) : ? array { |
|
19 | 19 | $titleData = []; |
20 | 20 | |
21 | 21 | $fullURL = $this->getFullTitleURL($title_url); |
@@ -34,8 +34,8 @@ discard block |
||
34 | 34 | |
35 | 35 | $link = preg_replace('/^(.*\/)(?:[0-9]+\.html)?$/', '$1', (string) $data['nodes_chapter']->getAttribute('href')); |
36 | 36 | $chapterURLSegments = explode('/', $link); |
37 | - $titleData['latest_chapter'] = $chapterURLSegments[5] . (isset($chapterURLSegments[6]) && !empty($chapterURLSegments[6]) ? "/{$chapterURLSegments[6]}" : ""); |
|
38 | - $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $data['nodes_latest']->nodeValue)); |
|
37 | + $titleData['latest_chapter'] = $chapterURLSegments[5].(isset($chapterURLSegments[6]) && !empty($chapterURLSegments[6]) ? "/{$chapterURLSegments[6]}" : ""); |
|
38 | + $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $data['nodes_latest']->nodeValue)); |
|
39 | 39 | |
40 | 40 | if($firstGet) { |
41 | 41 | $titleData = array_merge($titleData, $this->doCustomFollow($content['body'])); |
@@ -114,9 +114,9 @@ discard block |
||
114 | 114 | |
115 | 115 | $link = preg_replace('/^(.*\/)(?:[0-9]+\.html)?$/', '$1', (string) $nodes_chapter->item(0)->getAttribute('href')); |
116 | 116 | $chapterURLSegments = explode('/', $link); |
117 | - $titleData['latest_chapter'] = $chapterURLSegments[5] . (isset($chapterURLSegments[6]) && !empty($chapterURLSegments[6]) ? "/{$chapterURLSegments[6]}" : ""); |
|
117 | + $titleData['latest_chapter'] = $chapterURLSegments[5].(isset($chapterURLSegments[6]) && !empty($chapterURLSegments[6]) ? "/{$chapterURLSegments[6]}" : ""); |
|
118 | 118 | |
119 | - $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $nodes_latest->item(0)->nodeValue)); |
|
119 | + $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $nodes_latest->item(0)->nodeValue)); |
|
120 | 120 | |
121 | 121 | $title_url = explode('/', $title->getAttribute('href'))[4]; |
122 | 122 | $titleDataList[$title_url] = $titleData; |
@@ -1,4 +1,4 @@ discard block |
||
1 | -<?php declare(strict_types=1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
1 | +<?php declare(strict_types = 1); defined('BASEPATH') OR exit('No direct script access allowed'); |
|
2 | 2 | |
3 | 3 | class WebToons extends Base_Site_Model { |
4 | 4 | /* Webtoons.com has a very weird and pointless URL format. |
@@ -34,7 +34,7 @@ discard block |
||
34 | 34 | ]; |
35 | 35 | } |
36 | 36 | |
37 | - public function getTitleData(string $title_url, bool $firstGet = FALSE) : ?array { |
|
37 | + public function getTitleData(string $title_url, bool $firstGet = FALSE) : ? array { |
|
38 | 38 | $titleData = []; |
39 | 39 | |
40 | 40 | //FIXME: We don't use parseTitleDOM here due to using rss. Should probably have an alternate method for XML parsing. |
@@ -51,8 +51,8 @@ discard block |
||
51 | 51 | $titleData['title'] = trim((string) $xml->{'channel'}->title); |
52 | 52 | |
53 | 53 | $chapterURLSegments = explode('/', ((string) $xml->{'channel'}->item[0]->link)); |
54 | - $titleData['latest_chapter'] = preg_replace('/^.*?([0-9]+)$/', '$1', $chapterURLSegments[7]) . ':--:' . $chapterURLSegments[6]; |
|
55 | - $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $xml->{'channel'}->item[0]->pubDate)); |
|
54 | + $titleData['latest_chapter'] = preg_replace('/^.*?([0-9]+)$/', '$1', $chapterURLSegments[7]).':--:'.$chapterURLSegments[6]; |
|
55 | + $titleData['last_updated'] = date("Y-m-d H:i:s", strtotime((string) $xml->{'channel'}->item[0]->pubDate)); |
|
56 | 56 | |
57 | 57 | if($firstGet) { |
58 | 58 | $titleData = array_merge($titleData, $this->doCustomFollow($content['body'], ['id' => $title_parts[0]])); |