milvus_benchmark.utils.restart_server()   F
last analyzed

Complexity

Conditions 14

Size

Total Lines 73
Code Lines 63

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 14
eloc 63
nop 2
dl 0
loc 73
rs 3.6
c 0
b 0
f 0

How to fix   Long Method    Complexity   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

Complexity

Complex classes like milvus_benchmark.utils.restart_server() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
# -*- coding: utf-8 -*-
2
import os
3
import sys
4
import pdb
5
import time
6
import json
7
import datetime
8
import argparse
9
import threading
10
import logging
11
import string
12
import random
13
# import multiprocessing
14
import numpy as np
15
# import psutil
16
import sklearn.preprocessing
17
import h5py
18
# import docker
19
from yaml import full_load, dump
20
import yaml
21
import tableprint as tp
22
from pprint import pprint
23
24
25
logger = logging.getLogger("milvus_benchmark.utils")
26
27
REGISTRY_URL = "registry.zilliz.com/milvus/engine"
28
29
class literal_str(str): pass
30
31
def change_style(style, representer):
32
    def new_representer(dumper, data):
33
        scalar = representer(dumper, data)
34
        scalar.style = style
35
        return scalar
36
    return new_representer
37
38
from yaml.representer import SafeRepresenter
39
40
# represent_str does handle some corner cases, so use that
41
# instead of calling represent_scalar directly
42
represent_literal_str = change_style('|', SafeRepresenter.represent_str)
43
44
yaml.add_representer(literal_str, represent_literal_str)
45
46
47 View Code Duplication
def normalize(metric_type, X):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
48
    """
49
    Normalize the vectors.
50
51
    If type equals ip, using sklearn.preprocessing.normalize to convert it
52
    """
53
    if metric_type == "ip":
54
        logger.info("Set normalize for metric_type: %s" % metric_type)
55
        X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')
56
        X = X.tolist()
57
    elif metric_type in ["jaccard", "hamming", "sub", "super"]:
58
        tmp = []
59
        for _, item in enumerate(X):
60
            new_vector = bytes(np.packbits(item, axis=-1).tolist())
61
            tmp.append(new_vector)
62
        X = tmp
63
    return X
64
65
66
def get_unique_name(prefix=None):
67
    if prefix is None:
68
        prefix = "milvus-benchmark-test-"
69
    return prefix+"".join(random.choice(string.ascii_letters + string.digits) for _ in range(8)).lower()
70
71
72
def get_current_time():
73
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
74
75
76
def print_table(headers, columns, data):
77
    bodys = []
78
    for index, value in enumerate(columns):
79
        tmp = [value]
80
        tmp.extend(data[index])
81
        bodys.append(tmp)
82
    tp.table(bodys, headers)
83
84
85
def get_dataset(hdf5_file_path):
86
    if not os.path.exists(hdf5_file_path):
87
        raise Exception("%s not existed" % hdf5_file_path)
88
    dataset = h5py.File(hdf5_file_path)
89
    return dataset
90
91
92
def modify_config(k, v, type=None, file_path="conf/server_config.yaml", db_slave=None):
93
    if not os.path.isfile(file_path):
94
        raise Exception('File: %s not found' % file_path)
95
    with open(file_path) as f:
96
        config_dict = full_load(f)
97
        f.close()
98
    if config_dict:
99
        if k.find("use_blas_threshold") != -1:
100
            config_dict['engine_config']['use_blas_threshold'] = int(v)
101
        elif k.find("use_gpu_threshold") != -1:
102
            config_dict['engine_config']['gpu_search_threshold'] = int(v)
103
        elif k.find("cpu_cache_capacity") != -1:
104
            config_dict['cache_config']['cpu_cache_capacity'] = int(v)
105
        elif k.find("enable_gpu") != -1:
106
            config_dict['gpu_resource_config']['enable'] = v
107
        elif k.find("gpu_cache_capacity") != -1:
108
            config_dict['gpu_resource_config']['cache_capacity'] = int(v)
109
        elif k.find("index_build_device") != -1:
110
            config_dict['gpu_resource_config']['build_index_resources'] = v
111
        elif k.find("search_resources") != -1:  
112
            config_dict['resource_config']['resources'] = v
113
        # if db_slave:
114
        #     config_dict['db_config']['db_slave_path'] = MULTI_DB_SLAVE_PATH
115
        with open(file_path, 'w') as f:
116
            dump(config_dict, f, default_flow_style=False)
117
        f.close()
118
    else:
119
        raise Exception('Load file:%s error' % file_path)
120
121
122
# update server_config.yaml
123
def update_server_config(file_path, server_config):
124
    if not os.path.isfile(file_path):
125
        raise Exception('File: %s not found' % file_path)
126
    with open(file_path) as f:
127
        values_dict = full_load(f)
128
        f.close()
129
        for k, v in server_config.items():
130
            if k.find("primary_path") != -1:
131
                values_dict["db_config"]["primary_path"] = v
132
            elif k.find("use_blas_threshold") != -1:
133
                values_dict['engine_config']['use_blas_threshold'] = int(v)
134
            elif k.find("gpu_search_threshold") != -1:
135
                values_dict['engine_config']['gpu_search_threshold'] = int(v)
136
            elif k.find("cpu_cache_capacity") != -1:
137
                values_dict['cache_config']['cpu_cache_capacity'] = int(v)
138
            elif k.find("cache_insert_data") != -1:
139
                values_dict['cache_config']['cache_insert_data'] = v
140
            elif k.find("enable") != -1:
141
                values_dict['gpu_resource_config']['enable'] = v
142
            elif k.find("gpu_cache_capacity") != -1:
143
                values_dict['gpu_resource_config']['cache_capacity'] = int(v)
144
            elif k.find("build_index_resources") != -1:
145
                values_dict['gpu_resource_config']['build_index_resources'] = v
146
            elif k.find("search_resources") != -1:  
147
                values_dict['gpu_resource_config']['search_resources'] = v
148
            with open(file_path, 'w') as f:
149
                dump(values_dict, f, default_flow_style=False)
150
            f.close()
151
152
153
# update values.yaml
154
def update_values(file_path, deploy_mode, hostname, server_config):
155
    from kubernetes import client, config
156
    client.rest.logger.setLevel(logging.WARNING)
157
158
    if not os.path.isfile(file_path):
159
        raise Exception('File: %s not found' % file_path)
160
    # bak values.yaml
161
    file_name = os.path.basename(file_path)
162
    bak_file_name = file_name+".bak"
163
    file_parent_path = os.path.dirname(file_path)
164
    bak_file_path = file_parent_path+'/'+bak_file_name
165
    if os.path.exists(bak_file_path):
166
        os.system("cp %s %s" % (bak_file_path, file_path))
167
    else:
168
        os.system("cp %s %s" % (file_path, bak_file_path))
169
    with open(file_path) as f:
170
        values_dict = full_load(f)
171
        f.close()
172
173
    for k, v in server_config.items():
174
        if k.find("primary_path") != -1:
175
            suffix_path = server_config["suffix_path"] if "suffix_path" in server_config else None
176
            path_value = v
177
            if suffix_path:
178
                path_value = v + "_" + str(int(time.time()))
179
            values_dict["primaryPath"] = path_value
180
            values_dict['wal']['path'] = path_value+"/wal"
181
            values_dict['logs']['path'] = path_value+"/logs"
182
        # elif k.find("use_blas_threshold") != -1:
183
        #     values_dict['useBLASThreshold'] = int(v)
184
        elif k.find("gpu_search_threshold") != -1:
185
            values_dict['gpu']['gpuSearchThreshold'] = int(v)
186
        elif k.find("cpu_cache_capacity") != -1:
187
            values_dict['cache']['cacheSize'] = v
188
        # elif k.find("cache_insert_data") != -1:
189
        #     values_dict['cache']['cacheInsertData'] = v
190
        elif k.find("insert_buffer_size") != -1:
191
            values_dict['cache']['insertBufferSize'] = v
192
        elif k.find("gpu_resource_config.enable") != -1:
193
            values_dict['gpu']['enabled'] = v
194
        elif k.find("gpu_resource_config.cache_capacity") != -1:
195
            values_dict['gpu']['cacheSize'] = v
196
        elif k.find("build_index_resources") != -1:
197
            values_dict['gpu']['buildIndexDevices'] = v
198
        elif k.find("search_resources") != -1:
199
            values_dict['gpu']['searchDevices'] = v
200
        # wal
201
        elif k.find("auto_flush_interval") != -1:
202
            values_dict['storage']['autoFlushInterval'] = v
203
        elif k.find("wal_enable") != -1:
204
            values_dict['wal']['enabled'] = v
205
206
    # if values_dict['nodeSelector']:
207
    #     logger.warning("nodeSelector has been set: %s" % str(values_dict['engine']['nodeSelector']))
208
    #     return
209
    values_dict["wal"]["recoveryErrorIgnore"] = True
210
    # enable monitor
211
    values_dict["metrics"]["enabled"] = True
212
    values_dict["metrics"]["address"] = "192.168.1.237"
213
    values_dict["metrics"]["port"] = 9091
214
    # Using sqlite for single mode
215
    if deploy_mode == "single":
216
        values_dict["mysql"]["enabled"] = False
217
    # update values.yaml with the given host
218
    if hostname:
219
        config.load_kube_config()
220
        v1 = client.CoreV1Api()
221
        values_dict['nodeSelector'] = {'kubernetes.io/hostname': hostname}
222
        # node = v1.read_node(hostname)
223
        cpus = v1.read_node(hostname).status.allocatable.get("cpu")
224
        # set limit/request cpus in resources
225
        values_dict["image"]['resources'] = {
226
            "limits": {
227
                "cpu": str(int(cpus))+".0"
228
            },
229
            "requests": {
230
                "cpu": str(int(cpus)-1)+".0"
231
            }
232
        }
233
        values_dict['tolerations'] = [{
234
            "key": "worker",
235
            "operator": "Equal",
236
            "value": "performance",
237
            "effect": "NoSchedule"
238
        }]
239
    # add extra volumes
240
    values_dict['extraVolumes'] = [{
241
        'name': 'test',
242
        'flexVolume': {
243
            'driver': "fstab/cifs",
244
            'fsType': "cifs",
245
            'secretRef': {
246
                'name': "cifs-test-secret"
247
            },
248
            'options': {
249
                'networkPath': "//192.168.1.126/test",
250
                'mountOptions': "vers=1.0"
251
            }
252
        }
253
    }]
254
    values_dict['extraVolumeMounts'] = [{
255
        'name': 'test',
256
        'mountPath': '/test'
257
    }]
258
    # add extra volumes for mysql
259
    # values_dict['mysql']['persistence']['enabled'] = True
260
    # values_dict['mysql']['configurationFilesPath'] = "/etc/mysql/mysql.conf.d/"
261
    # values_dict['mysql']['imageTag'] = '5.6'
262
    # values_dict['mysql']['securityContext'] = {
263
    #         'enabled': True}
264
    # mysql_db_path = "/test"
265
    if deploy_mode == "shards":
266
        # mount_path = values_dict["primaryPath"]+'/data'
267
        # long_str = '- name: test-mysql\n  flexVolume:\n    driver: fstab/cifs\n    fsType: cifs\n    secretRef:\n      name: cifs-test-secret\n    options:\n      networkPath: //192.168.1.126/test\n      mountOptions: vers=1.0'
268
        # values_dict['mysql']['extraVolumes'] = literal_str(long_str)
269
        # long_str_2 = "- name: test-mysql\n  mountPath: %s" % mysql_db_path
270
        # values_dict['mysql']['extraVolumeMounts'] = literal_str(long_str_2)
271
        # mysql_cnf_str = '[mysqld]\npid-file=%s/mysql.pid\ndatadir=%s' % (mount_path, mount_path)
272
        # values_dict['mysql']['configurationFiles'] = {}
273
        # values_dict['mysql']['configurationFiles']['mysqld.cnf'] = literal_str(mysql_cnf_str)
274
        values_dict['mysql']['enabled'] = False
275
        values_dict['externalMysql']['enabled'] = True
276
        values_dict['externalMysql']["ip"] = "192.168.1.197"
277
        values_dict['externalMysql']["port"] = 3306
278
        values_dict['externalMysql']["user"] = "root"
279
        values_dict['externalMysql']["password"] = "Fantast1c"
280
        values_dict['externalMysql']["database"] = "db"
281
    logger.debug(values_dict)
282
    #  print(dump(values_dict))
283
    with open(file_path, 'w') as f:
284
        dump(values_dict, f, default_flow_style=False)
285
    f.close()
286
    # DEBUG
287
    with open(file_path) as f:
288
        for line in f.readlines():
289
            line = line.strip("\n")
290
            logger.debug(line)
291
292
293
def helm_install_server(helm_path, deploy_mode, image_tag, image_type, name, namespace):
294
    """Deploy server with using helm.
295
296
    """
297
    from kubernetes import client, config
298
    client.rest.logger.setLevel(logging.WARNING)
299
300
    timeout = 300
301
    logger.debug("Server deploy mode: %s" % deploy_mode)
302
    host = "%s.%s.svc.cluster.local" % (name, namespace)
303
    if deploy_mode == "single":
304
        install_cmd = "helm install --wait --timeout %ds \
305
                --set image.repository=%s \
306
                --set image.tag=%s \
307
                --set image.pullPolicy=Always \
308
                --set service.type=ClusterIP \
309
                -f ci/filebeat/values.yaml \
310
                --namespace %s \
311
                %s ." % (timeout, REGISTRY_URL, image_tag, namespace, name)
312
    elif deploy_mode == "shards":
313
        install_cmd = "helm install --wait --timeout %ds \
314
                --set cluster.enabled=true \
315
                --set persistence.enabled=true \
316
                --set mishards.image.tag=test \
317
                --set mishards.image.pullPolicy=Always \
318
                --set image.repository=%s \
319
                --set image.tag=%s \
320
                --set image.pullPolicy=Always \
321
                --set service.type=ClusterIP \
322
                -f ci/filebeat/values.yaml \
323
                --namespace %s \
324
                %s ." % (timeout, REGISTRY_URL, image_tag, namespace, name)
325
    logger.debug(install_cmd)
0 ignored issues
show
introduced by
The variable install_cmd does not seem to be defined for all execution paths.
Loading history...
326
    logger.debug(host)
327
    if os.system("cd %s && %s" % (helm_path, install_cmd)):
328
        logger.error("Helm install failed")
329
        return None
330
    time.sleep(5)
331
    # config.load_kube_config()
332
    # v1 = client.CoreV1Api()
333
    # pod_name = None
334
    # pod_id = None
335
    # pods = v1.list_namespaced_pod(namespace)
336
    # for i in pods.items:
337
    #     if i.metadata.name.find(name) != -1:
338
    #         pod_name = i.metadata.name
339
    #         pod_ip = i.status.pod_ip
340
    # logger.debug(pod_name)
341
    # logger.debug(pod_ip)
342
    # return pod_name, pod_ip
343
    return host
344
345
346
def helm_del_server(name, namespace):
347
    """
348
    Delete server with using helm uninstall.
349
350
    Return status if uninstall successfully or not
351
    """
352
    # logger.debug("Sleep 600s before uninstall server")
353
    # time.sleep(600)
354
    del_cmd = "helm uninstall -n milvus %s" % name
355
    logger.debug(del_cmd)
356
    if os.system(del_cmd):
357
        logger.error("Helm delete name:%s failed" % name)
358
        return False
359
    return True
360
361
362
def restart_server(helm_release_name, namespace):
363
    res = True
364
    timeout = 120
365
    from kubernetes import client, config
366
    client.rest.logger.setLevel(logging.WARNING)
367
368
    # service_name = "%s.%s.svc.cluster.local" % (helm_release_name, namespace)
369
    config.load_kube_config()
370
    v1 = client.CoreV1Api()
371
    pod_name = None
372
    # config_map_names = v1.list_namespaced_config_map(namespace, pretty='true')
373
    # body = {"replicas": 0}
374
    pods = v1.list_namespaced_pod(namespace)
375
    for i in pods.items:
376
        if i.metadata.name.find(helm_release_name) != -1 and i.metadata.name.find("mysql") == -1:
377
            pod_name = i.metadata.name
378
            break
379
            # v1.patch_namespaced_config_map(config_map_name, namespace, body, pretty='true')
380
    # status_res = v1.read_namespaced_service_status(helm_release_name, namespace, pretty='true')
381
    logger.debug("Pod name: %s" % pod_name)
382
    if pod_name is not None:
383
        try:
384
            v1.delete_namespaced_pod(pod_name, namespace)
385
        except Exception as e:
386
            logging.error(str(e))
387
            logging.error("Exception when calling CoreV1Api->delete_namespaced_pod")
388
            res = False
389
            return res
390
        logging.error("Sleep 10s after pod deleted")
391
        time.sleep(10)
392
        # check if restart successfully
393
        pods = v1.list_namespaced_pod(namespace)
394
        for i in pods.items:
395
            pod_name_tmp = i.metadata.name
396
            logging.error(pod_name_tmp)
397
            if pod_name_tmp == pod_name:
398
                continue
399
            elif pod_name_tmp.find(helm_release_name) == -1 or pod_name_tmp.find("mysql") != -1:
400
                continue
401
            else:
402
                status_res = v1.read_namespaced_pod_status(pod_name_tmp, namespace, pretty='true')
403
                logging.error(status_res.status.phase)
404
                start_time = time.time()
405
                while time.time() - start_time <= timeout:
406
                    logging.error(time.time())
407
                    status_res = v1.read_namespaced_pod_status(pod_name_tmp, namespace, pretty='true')
408
                    if status_res.status.phase == "Running":
409
                        logging.error("Running")
410
                        logging.error("Sleep after restart")
411
                        break
412
                    else:
413
                        time.sleep(1)
414
                if time.time() - start_time > timeout:
415
                    logging.error("Restart pod: %s timeout" % pod_name_tmp)
416
                    res = False
417
                    return res
418
    else:
419
        raise Exception("Pod: %s not found" % pod_name)
420
    follow = True
421
    pretty = True
422
    previous = True # bool | Return previous terminated container logs. Defaults to false. (optional)
423
    since_seconds = 56 # int | A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified. (optional)
424
    timestamps = True # bool | If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false. (optional)
425
    container = "milvus"
426
    try:
427
        api_response = v1.read_namespaced_pod_log(pod_name_tmp, namespace, container=container, follow=follow, pretty=pretty, previous=previous, since_seconds=since_seconds, timestamps=timestamps)
0 ignored issues
show
introduced by
The variable pod_name_tmp does not seem to be defined in case the for loop on line 394 is not entered. Are you sure this can never be the case?
Loading history...
428
        logging.error(api_response)
429
    except Exception as e:
430
        logging.error("Exception when calling CoreV1Api->read_namespaced_pod_log: %s\n" % e)
431
        res = False
432
        return res
433
    time.sleep(30)
434
    return res
435
436
437
# def pull_image(image):
438
#     registry = image.split(":")[0]
439
#     image_tag = image.split(":")[1]
440
#     client = docker.APIClient(base_url='unix://var/run/docker.sock')
441
#     logger.info("Start pulling image: %s" % image)
442
#     return client.pull(registry, image_tag)
443
444
445
# def run_server(image, mem_limit=None, timeout=30, test_type="local", volume_name=None, db_slave=None):
446
#     import colors
447
448
#     client = docker.from_env()
449
#     # if mem_limit is None:
450
#     #     mem_limit = psutil.virtual_memory().available
451
#     # logger.info('Memory limit:', mem_limit)
452
#     # cpu_limit = "0-%d" % (multiprocessing.cpu_count() - 1)
453
#     # logger.info('Running on CPUs:', cpu_limit)
454
#     for dir_item in ['logs', 'db']:
455
#         try:
456
#             os.mkdir(os.path.abspath(dir_item))
457
#         except Exception as e:
458
#             pass
459
460
#     if test_type == "local":
461
#         volumes = {
462
#             os.path.abspath('conf'):
463
#                 {'bind': '/opt/milvus/conf', 'mode': 'ro'},
464
#             os.path.abspath('logs'):
465
#                 {'bind': '/opt/milvus/logs', 'mode': 'rw'},
466
#             os.path.abspath('db'):
467
#                 {'bind': '/opt/milvus/db', 'mode': 'rw'},
468
#         }
469
#     elif test_type == "remote":
470
#         if volume_name is None:
471
#             raise Exception("No volume name")
472
#         remote_log_dir = volume_name+'/logs'
473
#         remote_db_dir = volume_name+'/db'
474
475
#         for dir_item in [remote_log_dir, remote_db_dir]:
476
#             if not os.path.isdir(dir_item):
477
#                 os.makedirs(dir_item, exist_ok=True)
478
#         volumes = {
479
#             os.path.abspath('conf'):
480
#                 {'bind': '/opt/milvus/conf', 'mode': 'ro'},
481
#             remote_log_dir:
482
#                 {'bind': '/opt/milvus/logs', 'mode': 'rw'},
483
#             remote_db_dir:
484
#                 {'bind': '/opt/milvus/db', 'mode': 'rw'}
485
#         }
486
#         # add volumes
487
#         if db_slave and isinstance(db_slave, int):
488
#             for i in range(2, db_slave+1):
489
#                 remote_db_dir = volume_name+'/data'+str(i)
490
#                 if not os.path.isdir(remote_db_dir):
491
#                     os.makedirs(remote_db_dir, exist_ok=True)
492
#                 volumes[remote_db_dir] = {'bind': '/opt/milvus/data'+str(i), 'mode': 'rw'}
493
494
#     container = client.containers.run(
495
#         image,
496
#         volumes=volumes,
497
#         runtime="nvidia",
498
#         ports={'19530/tcp': 19530, '8080/tcp': 8080},
499
#         # environment=["OMP_NUM_THREADS=48"],
500
#         # cpuset_cpus=cpu_limit,
501
#         # mem_limit=mem_limit,
502
#         # environment=[""],
503
#         detach=True)
504
505
#     def stream_logs():
506
#         for line in container.logs(stream=True):
507
#             logger.info(colors.color(line.decode().rstrip(), fg='blue'))
508
509
#     if sys.version_info >= (3, 0):
510
#         t = threading.Thread(target=stream_logs, daemon=True)
511
#     else:
512
#         t = threading.Thread(target=stream_logs)
513
#         t.daemon = True
514
#     t.start()
515
516
#     logger.info('Container: %s started' % container)
517
#     return container
518
#     # exit_code = container.wait(timeout=timeout)
519
#     # # Exit if exit code
520
#     # if exit_code == 0:
521
#     #     return container
522
#     # elif exit_code is not None:
523
#     #     print(colors.color(container.logs().decode(), fg='red'))
524
525
# def restart_server(container):
526
#     client = docker.APIClient(base_url='unix://var/run/docker.sock')
527
528
#     client.restart(container.name)
529
#     logger.info('Container: %s restarted' % container.name)
530
#     return container
531
532
533
# def remove_container(container):
534
#     container.remove(force=True)
535
#     logger.info('Container: %s removed' % container)
536
537
538
# def remove_all_containers(image):
539
#     client = docker.from_env()
540
#     try:
541
#         for container in client.containers.list():
542
#             if image in container.image.tags:
543
#                 container.stop(timeout=30)
544
#                 container.remove(force=True)
545
#     except Exception as e:
546
#         logger.error("Containers removed failed")
547
548
549
# def container_exists(image):
550
#     '''
551
#     Check if container existed with the given image name
552
#     @params: image name
553
#     @return: container if exists
554
#     '''
555
#     res = False
556
#     client = docker.from_env()
557
#     for container in client.containers.list():
558
#         if image in container.image.tags:
559
#             # True
560
#             res = container
561
#     return res
562
563
564
if __name__ == '__main__':
565
    update_values("","shards",None,None)
566