summaryrefslogtreecommitdiff
path: root/python-akerbp-mlops.spec
blob: 497d7e465197eb25bfa99f074136807e56f050dd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
%global _empty_manifest_terminate_build 0
Name:		python-akerbp.mlops
Version:	2.5.8
Release:	1
Summary:	MLOps framework
License:	MIT License
URL:		https://bitbucket.org/akerbp/akerbp.mlops/
Source0:	https://mirrors.nju.edu.cn/pypi/web/packages/21/ea/9aea1932b2426dbe4cbbb66b3e054b9e4edc5c7bd57360fa71a25e200710/akerbp.mlops-2.5.8.tar.gz
BuildArch:	noarch

Requires:	python3-cognite-sdk[pandas]
Requires:	python3-pytest
Requires:	python3-pydantic
Requires:	python3-PyYAML

%description
model_name: model2
human_friendly_model: 'My Second Model'
model_file: model_code/model2.py
(...)
```
## Files and Folders Structure
All the model code and files should be under a single folder, e.g. `model_code`.
**Required** files in this folder:
- `model.py`: implements the standard model interface
- `test_model.py`: tests to verify that the model code is correct and to verify
  correct deployment
- `requirements.model`: libraries needed (with specific **version numbers**),
  can't be called `requirements.txt`. Add the MLOps framework like this:
  ```bash
  # requirements.model
  (...) # your other reqs
  akerbp.mlops==MLOPS_VERSION
  ```
  During deployment, `MLOPS_VERSION` will be automatically replaced by the
  specific version **that you have installed locally**. Make sure you have the latest release on your local machine prior to model deployment.
For the prediction service we require the model interface to have the following class and function
  - initialization(), with required arguments
    - path to artifact folder
    - secrets
      - these arguments can safely be set to None, and the framework will handle everything under the hood.
      - only set path to artifact folder as None if not using any artifacts
  - predict(), with required arguments
    - data
    - init_object (output from initialization() function)
    - secrets
      - You can safely put the secrets argument to None, and the framework will handle the secrets under the hood.
  - ModelException class with inheritance from an Exception base class
For the training service we require the model interface to have the following class and function
  - train(), with required arguments
    - folder_path
      - path to store model artifacts to be consumed by the prediction service
  - ModelException class with inheritance from an Exception base class
The following structure is recommended for projects with multiple models:
- `model_code/model1/`
- `model_code/model2/`
- `model_code/common_code/`
This is because when deploying a model, e.g. `model1`, the top folder in the
path (`model_code` in the example above) is copied and deployed, i.e.
`common_code` folder (assumed to be needed by `model1`) is included. Note that
`model2` folder would also be deployed (this is assumed to be unnecessary but
harmless).
## Import Guidelines
The repo's root folder is the base folder when importing. For example, assume
you have these files in the folder with model code:
 - `model_code/model.py`
 - `model_code/helper.py`
 - `model_code/data.csv`
If `model.py` needs to import `helper.py`, use: `import model_code.helper`. If
`model.py` needs to read `data.csv`, the right path is
`os.path.join('model_code', 'data.csv')`.
It's of course possible to import from the Mlops package, e.g. its logger:
``` python
from akerbp.mlops.core import logger
logging=logger.get_logger("logger_name")
logging.debug("This is a debug log")
```
## Services
We consider two types of services: prediction and training.
Deployed services can be called with
```python
from akerbp.mlops.xx.helpers import call_function
output = call_function(external_id, data)
```
Where `xx` is either `'cdf'` or `'gc'`, and `external_id` follows the
structure `model-service-env`:
 - `model`: model name given by the user (settings file)
 - `service`: either `training` or `prediction`
 - `env`: either `dev`, `test` or `prod` (depending on the deployment
   environment)
The output has a status field (`ok` or `error`). If they are 'ok', they have
also a `prediction` and `prediction_file` or `training` field (depending on the type of service). The
former is determined by the `predict` method of the model, while the latter
combines artifact metadata and model metadata produced by the `train` function.
Prediction services have also a `model_id` field to keep track of which model
was used to predict.
See below for more details on how to call prediction services hosted in CDF.
## Deployment Platform
Model services (described below) are deployed to CDF, i.e. Cognite Data Fusion
CDF Functions include metadata when they are called. This information can be
used to redeploy a function (specifically, the `file_id` field). Example:
```python
import akerbp.mlops.cdf.helpers as cdf
human_readable_name = "My model"
external_id = "my_model-prediction-test"
cdf.set_up_cdf_client('deploy')
cdf.redeploy_function(
  human_readable_name
  external_id,
  file_id,
  'Description',
  'your@email.com'
)
```
Note that the external-id of a function needs to be unique, as this is used to distinguish functions between services and hosting environment.
It's possible to query available functions (can be filtered by environment
and/or tags). Example:
```python
import akerbp.mlops.cdf.helpers as cdf
cdf.set_up_cdf_client('deploy')
all_functions = cdf.list_functions()
test_functions = cdf.list_functions(env="test")
tag_functions = cdf.list_functions(tags=["well_interpretation"])
```
Functions can be deleted. Example:
```python
import akerbp.mlops.cdf.helpers as cdf
cdf.set_up_cdf_client('deploy')
cdf.delete_service("my_model-prediction-test")
```
Functions can be called in parallel. Example:
```python
from akerbp.mlops.cdf.helpers import call_function_parallel
function_name = 'my_function-prediction-prod'
data = [dict(data='data_call_1'), dict(data='data_call_2')]
response1, response2 = call_function_parallel(function_name, data)
```
## Model Manager
Model Manager is the module dedicated to managing the model artifacts used by
prediction services (and generated by training services). This module uses CDF
Files as backend.
Model artifacts are versioned and stored together with user-defined metadata.
Uploading a new model increases the version count by 1 for that model and
environment. When deploying a prediction service, the latest model version is
chosen. It would be possible to extend the framework to allow deploying specific
versions or filtering by metadata.
Model artifacts are segregated by environment (e.g. only production artifacts
can be deployed to production). Model artifacts have to be uploaded manually to
test (or dev) environment before deployment. Code example:
```python
import akerbp.mlops.model_manager as mm
metadata = train(model_dir, secrets) # or define it directly
mm.setup()
folder_info = mm.upload_new_model_version(
  model_name,
  env,
  folder_path,
  metadata
)
```
If there are multiple models, you need to do this one at at time. Note that
`model_name` corresponds to one of the elements in `model_names` defined in
`mlops_settings.py`, `env` is the target environment (where the model should be
available), `folder_path` is the local model artifact folder and `metadata` is a
dictionary with artifact metadata, e.g. performance, git commit, etc.
Model artifacts needs to be promoted to the production environment (i.e. after
they have been deployed successfully to test environment) so that a prediction
service can be deployed in production.
```python
# After a model's version has been successfully deployed to test
import akerbp.mlops.model_manager as mm
mm.setup()
mm.promote_model('model', 'version')
```
### Versioning
Each model artifact upload/promotion increments a version number (environment dependent)
available in Model Manager. However, this doesn't modify the model
artifacts used in existing prediction services (i.e. nothing changes in CDF
Functions). To reflect the newly uploaded/promoted model artifacts in the existing services one need to deploy the services again. Note that we dont have to specify the artifact version explicitly if we want to deploy using the latest artifacts, as this is done by default.
Recommended process to update a model artifact and prediction service:
1. New model features implemented in a feature branch
2. New artifact generated and uploaded to test environment
3. Feature branch merged with master
4. Test deployment is triggered automatically: prediction service is deployed to
   test environment with the latest artifact version (in test)
5. Prediction service in test is verified
6. Artifact version is promoted manually from command line whenever suitable
7. Production deployment is triggered manually from Bitbucket: prediction
   service is deployed to production with the latest artifact version (in prod)
It's possible to get an overview of the model artifacts managed by Model
Manager. Some examples (see `get_model_version_overview` documentation for other
possible queries):
```python
import akerbp.mlops.model_manager as mm
mm.setup()
# all artifacts
folder_info = mm.get_model_version_overview()
# all artifacts for a given model
folder_info = mm.get_model_version_overview(model_name='xx')
```
If the overview shows model artifacts that are not needed, it is possible to
remove them. For example if artifact "my_model/dev/5" is not needed:
```python
model_to_remove = "my_model/dev/5"
mm.delete_model_version(model_to_remove)
```
Model Manager will by default show information on the artifact to delete and ask
for user confirmation before proceeding. It's possible (but not recommended) to
disable this check. There's no identity check, so it's possible to delete any
model artifact (from other data scientist). Be careful!
It's possible to download a model artifact (e.g. to verify its content). For
example:
```python
mm.download_model_version('model_name', 'test', 'artifact_folder', version=5)
```
If no version is specified, the latest one is downloaded by default.
By default, Model Manager assumes artifacts are stored in the `mlops` dataset.
If your project uses a different one, you need to specify during setup (see
`setup` function).
Further information:
- Model Manager requires `COGNITE_API_KEY_*` environmental variables (see next
  section) or a suitable key passed to the `setup` function.
- In projects with a training service, you can rely on it to upload a first
  version of the model. The first prediction service deployment will fail, but
  you can deploy again after the training service has produced a model.
- When you deploy from the development environment (covered later in this
  document), the model artifacts in the settings file can point to existing
  local folders. These will then be used for the deployment. Version is then
  fixed to `model_name/dev/1`. Note that these artifacts are not uploaded to CDF
  Files.
- Prediction services are deployed with model artifacts (i.e. the artifact is
  copied to the project file used to create the CDF Function) so that they are
  available at prediction time. Downloading artifacts at run time would require
  waiting time, and files written during run time consume ram memory).
## Model versioning
To allow for model versioning and rolling back to previous model deployments, the external id of the functions (in CDF) includes a version number that is reflected by the latest artifact version number when deploying the function (see above).
Everytime we upload/promote new model artifacts and deploy our services, the version number of the external id of the functions representing the services are incremented (just as the version number for the artifacts).
To distinguish the latest model from the remaining model versions, we redeploy the latest model version using a predictable external id that does not contain the version number. By doing so we relieve the clients need of dealing with version numbers, and they will call the latest model by default. For every new deployment, we will thus have two model deployments - one with the version number, and one without the version number in the external id.  However, the predictable external id is persisted across new model versions, so when deploying a new version the latest one, with the predictable external id, is simply overwritten.
We are thus concerned with two structures for the external id
- ```<model_name>-<service>-<env>-<version>``` for rolling back to previous versions, and
- ```<model_name>-<service>-<env>``` for the latest deployed model
For the latest model with a predictable external id, we tag the description of the model to specify that the model is in fact the latest version, and add the version number to the function metadata.
We can now list out multiple models with the same model name and external id prefix, and choose to make predictions and do inference with a specific model version. An example is shown below.
```python
# List all prediction services (i.e. models) with name "My Model" hosted in the test environment, and model corresponding to the first element of the list
from cognite.client import CogniteClient, ClientConfig
from cognite.client.credentials import APIKey
cnf = ClientConfig(client_name="model inference", project="akbp-subsurface", credentials=APIKey("my-api-key"))
client = CogniteClient(config=cnf) # pass an arbitrary client_name
my_models = client.functions.list(name="My Model", external_id_prefix="mymodel-prediction-test")
my_model_specific_version = my_models[0]
```
## Calling a deployed model prediction service hosted in CDF
This section describes how you can call deployed models and obtain predictions for doing inference.
We have two options for calling a function in CDF, either using the MLOps framework directly or by using the Cognite SDK. Independent of how you call your model, you have to pass the data as a dictionary with a key "data" containing a dictionary with your data, where the keys of the inner dictionary specifies the columns, and the values are list of samples for the corresponding columns.
First, load your data and transform it to a dictionary as assumed by the framework. Note that the data dictionary you pass to the function might vary based on your model interface. Make sure to align with what you specified in your `model.py` interface.
```python
import pandas as pd
data = pd.read_csv("path_to_data")
input_data = data.drop(columns=[target_variables])
data_dict = {"data": input_data.to_dict(orient=list), "to_file": True}
```
The "to_file" key of the input data dictionary specifies how the predictions can be extracted downstream. More details are provided below
Calling deployed model using MLOps:
1. Set up a cognite client with sufficient access rights
2. Extract the response directly by specifying the external id of the model and passing your data as a dictionary
    - Note that the external id is on the form
      - ```"<model_name>-<service>-<env>-<version>"```, and
      - ```"<model_name>-<service>-<env>"```
Use the latter external id if you want to call the latest model. The former external id can be used if you want to call a previous version of your model.
```python
from akerbp.mlops.cdf.helpers import set_up_cdf_client, call_function
set_up_cdf_client(context="deploy") #access CDF data, files and functions with deploy context
response = call_function(function_name="<model_name>-prediction-<env>", data=data_dict)
```
Calling deployed model using the Cognite SDK:
1. set up cognite client with sufficient access rights
2. Retreive model from CDF by specifying the external-id of the model
3. Call the function
4. Extract the function call response from the function call
```python
from cognite.client import CogniteClient, ClientConfig
from cognite.client.credentials import APIKey
cnf = ClientConfig(client_name="model inference", project="akbp-subsurface", credentials=APIKey("my-api-key")) # pass an arbitrary client_name
client = CogniteClient(config=cnf)
function = client.functions.retrieve(external_id="<model_name>-prediction-<env>")
function_call = function.call(data=data_dict)
response = function_call.get_response()
```
Depending on how you specified the input dictionary, the predictions are available directly from the response or needs to be extracted from Cognite Files.
If the input data dictionary contains a key "to_file" with value True, the predictions are uploaded to cognite Files, and the 'prediction_file' field in the response will contain a reference to the file containing the predictions. If "to_file" is set to False, or if the input dictionary does not contain such a key-value pair, the predictions are directly available through the function call response.
If "to_file" = True, we can extract the predictions using the following code-snippet
```python
file_id = response["prediction_file"]
bytes_data = client.files.download_bytes(external_id=file_id)
predictions_df = pd.DataFrame.from_dict(json.loads(bytes_data))
```
Otherwise, the predictions are directly accessible from the response as follows.
```python
predictions = response["predictions"]
```
## Extracting metadata from deployed model in CDF
Once a model is deployed, a user can extract potentially valuable metadata as follows.
```python
my_function = client.functions.retrieve(external_id="my_model-prediction-test")
metadata = my_function.metadata
```
Where the metadata corresponds to whatever you specified in the mlops_settings.yaml file. For this example we get the following metadata
```
{'cat_filler': 'UNKNOWN',
 'imputed': 'True',
 'input_types': '[int, float, string]',
 'num_filler': '-999.15',
 'output_curves': '[AC]',
 'output_unit': '[s/ft]',
 'petrel_exposure': 'False',
 'required_input': '[ACS, RDEP, DEN]',
 'training_wells': '[3/1-4]',
 'units': '[s/ft, 1, kg/m3]'}
```
## Local Testing and Deployment
It's possible to tests the functions locally, which can help you debug errors
quickly. This is recommended before a deployment.
Define the following environmental variables (e.g. in `.bashrc`):
```bash
export ENV=dev
export COGNITE_API_KEY_PERSONAL=xxx
export COGNITE_API_KEY_FUNCTIONS=$COGNITE_API_KEY_PERSONAL
export COGNITE_API_KEY_DATA=$COGNITE_API_KEY_PERSONAL
export COGNITE_API_KEY_FILES=$COGNITE_API_KEY_PERSONAL
```
From your repo's root folder:
- `python -m pytest model_code` (replace `model_code` by your model code folder
  name)
- `deploy_prediction_service.sh`
- `deploy_training_service.sh` (if there's a training service)
The first one will run your model tests. The last two run model tests but also
the service tests implemented in the framework and simulate deployment.
If you really want to deploy from your development environment, you can run
this: `LOCAL_DEPLOYMENT=True deploy_prediction_service.sh`
Note that, in case of emergency, it's possible to deploy to test or production
from your local environment, e.g. : `LOCAL_DEPLOYMENT=True ENV=test deploy_prediction_service.sh`
If you want to run tests only you need to set `TESTING_ONLY=True` before calling the deployment script.
## Automated Deployments from Bitbucket
Deployments to the test environment are triggered by commits (you need to push
them). Deployments to the production environment are enabled manually from the
Bitbucket pipeline dashboard. Branches that match 'deploy/*' behave as master.
Branches that match `feature/*` run tests only (i.e. do not deploy).
It is assumed that most projects won't include a training service. A branch that
matches 'mlops/*' deploys both prediction and training services. If a project
includes both services, the pipeline file could instead be edited so that master
deployed both services.
It is possible to schedule the training service in CDF, and then it can make
sense to schedule the deployment pipeline of the model service (as often as new
models are trained)
NOTE: Previous version of akerbp.mlops assumes that calling
`LOCAL_DEPLOYMENT=True bash deploy_prediction_service.sh` will not deploy models and run tests.
The package is now refactored to only trigger tests when the environment variable
`TESTING_ONLY` is set to `True`, and allows to deploy locally when setting `LOCAL_DEPLOYMENT=True`.
Make sure to update the pipeline definition for branches with prefix `feature/`to call
`TESTING_ONLY=True bash deploy_prediction_service.sh` instead.
## Bitbucket Setup
The following environments need to be defined in `repository settings >
deployments`:
- test deployments: `test-prediction` and `test-training`, each with `ENV=test`
- production deployments: `production-prediction` and `production-training`,
  each with `ENV=prod`
The following need to be defined in `respository settings > repository
variables`: `COGNITE_API_KEY_DATA`, `COGNITE_API_KEY_FUNCTIONS`,
`COGNITE_API_KEY_FILES` (these should be CDF keys with access to data, functions
and files).
The pipeline needs to be enabled.
# Developer/Admin Guide
## Package versioning
The versioning of the package follows [Semantic Versioning 2.0.0](https://semver.org/), using the `MAJOR.MINOR.PATCH` structure. We are thus updating the package version using the following convention
1. Increment MAJOR when making incompatible API changes
2. Increment MINOR when adding backwards compatible functionality
3. Increment PATCH when making backwards compatible bug-fixes
The version is updated based on the latest commit to the repo, and we are currently using the following rules.
- The MAJOR version is incremented if the commit message includes the word `major`
- The MINOR version is incremented if the commit message includes the word `minor`
- The PATCH number is incremented if neither `major` nor `minor` if found in the commit message
- If the commit message includes the phrase `pre-release`, the package version is extended with `-alpha`, thus taking the form `MAJOR.MINOR.PATCH-alpha`.
Note that the above keywords are **not** case sensitive. Moreover, `major` takes precedence over `minor`, so if both keywords are found in the commit message, the MAJOR version is incremented and the MINOR version is kept unchanged.
In dev and test environment, we release the package using the pre-release tag, and the package takes the following version number `MAJOR.MINOR.PATCH-alpha`.
The version number is kept track of in the `version.txt` file. Because this is updated during the pipeline build when releasing the package to PyPI (both in test and prod), we have to pull from git every time a new version is released.
## MLOps Files and Folders
These are the files and folders in the MLOps repo:
- `src` contains the MLOps framework package
- `mlops_settings.yaml` contains the user settings for the dummy model
- `model_code` is a model template included to show the model interface. It is
  not needed by the framework, but it is recommended to become familiar with it.
- `model_artifact` stores the artifacts for the model shown in  `model_code`.
  This is to help to test the model and learn the framework.
- `bitbucket-pipelines.yml` describes the deployment pipeline in Bitbucket
- `build.sh` is the script to build and upload the package
- `setup.py` is used to build the package
- `version.txt` keep track of the package version number
- `LICENSE` is the package's license
## CDF Datasets
In order to control access to the artifacts:
1. Set up a CDF Dataset with `write_protected=True` and a `external_id`, which
   by default is expected to be `mlops`.
2. Create a group of owners (CDF Dashboard), i.e. those that should have write
   access
## Build and Upload Package
Create an account in pypi, then create a token and a `$HOME/.pypirc` file. Edit
`setup.py` file and note the following:
- Dependencies need to be registered
- Bash scripts will be installed in a `bin` folder in the `PATH`.
The pipeline is setup to build the library from Bitbucket, but it's possible to
build and upload the library from the development environment as well:
```bash
bash build.sh
```
In general this is required before `LOCAL_DEPLOYMENT=True bash
deploy_xxx_service.sh`. The exception is if local changes affect only the
deployment part of the library, and the library has been installed in developer
mode with:
```bash
pip install -e .
```
In this mode, the installed package links to the source code, so that it can be
modified without the need to reinstall).
## Bitbucket Setup
In addition to the user setup, the following is needed to build the package:
- `test-pypi`: `ENV=test`, `TWINE_USERNAME=__token__` and `TWINE_PASSWORD`
  (token generated from pypi)
- `prod-pypi`: `ENV=prod`, `TWINE_USERNAME=__token__` and `TWINE_PASSWORD`
  (token generated from pypi, can be the same as above)
## Notes on the code
Service testing happens in an independent process (subprocess library) to avoid
setup problems:
 - When deploying multiple models the service had to be reloaded before testing
   it, otherwise it would be the first model's service. Model initialization in
   the prediction service is designed to load artifacts only once in the process
 - If the model and the MLOps framework rely on different versions of the same
   library, the version would be changed during runtime, but the
   upgraded/downgraded version would not be available for the current process

%package -n python3-akerbp.mlops
Summary:	MLOps framework
Provides:	python-akerbp.mlops
BuildRequires:	python3-devel
BuildRequires:	python3-setuptools
BuildRequires:	python3-pip
%description -n python3-akerbp.mlops
model_name: model2
human_friendly_model: 'My Second Model'
model_file: model_code/model2.py
(...)
```
## Files and Folders Structure
All the model code and files should be under a single folder, e.g. `model_code`.
**Required** files in this folder:
- `model.py`: implements the standard model interface
- `test_model.py`: tests to verify that the model code is correct and to verify
  correct deployment
- `requirements.model`: libraries needed (with specific **version numbers**),
  can't be called `requirements.txt`. Add the MLOps framework like this:
  ```bash
  # requirements.model
  (...) # your other reqs
  akerbp.mlops==MLOPS_VERSION
  ```
  During deployment, `MLOPS_VERSION` will be automatically replaced by the
  specific version **that you have installed locally**. Make sure you have the latest release on your local machine prior to model deployment.
For the prediction service we require the model interface to have the following class and function
  - initialization(), with required arguments
    - path to artifact folder
    - secrets
      - these arguments can safely be set to None, and the framework will handle everything under the hood.
      - only set path to artifact folder as None if not using any artifacts
  - predict(), with required arguments
    - data
    - init_object (output from initialization() function)
    - secrets
      - You can safely put the secrets argument to None, and the framework will handle the secrets under the hood.
  - ModelException class with inheritance from an Exception base class
For the training service we require the model interface to have the following class and function
  - train(), with required arguments
    - folder_path
      - path to store model artifacts to be consumed by the prediction service
  - ModelException class with inheritance from an Exception base class
The following structure is recommended for projects with multiple models:
- `model_code/model1/`
- `model_code/model2/`
- `model_code/common_code/`
This is because when deploying a model, e.g. `model1`, the top folder in the
path (`model_code` in the example above) is copied and deployed, i.e.
`common_code` folder (assumed to be needed by `model1`) is included. Note that
`model2` folder would also be deployed (this is assumed to be unnecessary but
harmless).
## Import Guidelines
The repo's root folder is the base folder when importing. For example, assume
you have these files in the folder with model code:
 - `model_code/model.py`
 - `model_code/helper.py`
 - `model_code/data.csv`
If `model.py` needs to import `helper.py`, use: `import model_code.helper`. If
`model.py` needs to read `data.csv`, the right path is
`os.path.join('model_code', 'data.csv')`.
It's of course possible to import from the Mlops package, e.g. its logger:
``` python
from akerbp.mlops.core import logger
logging=logger.get_logger("logger_name")
logging.debug("This is a debug log")
```
## Services
We consider two types of services: prediction and training.
Deployed services can be called with
```python
from akerbp.mlops.xx.helpers import call_function
output = call_function(external_id, data)
```
Where `xx` is either `'cdf'` or `'gc'`, and `external_id` follows the
structure `model-service-env`:
 - `model`: model name given by the user (settings file)
 - `service`: either `training` or `prediction`
 - `env`: either `dev`, `test` or `prod` (depending on the deployment
   environment)
The output has a status field (`ok` or `error`). If they are 'ok', they have
also a `prediction` and `prediction_file` or `training` field (depending on the type of service). The
former is determined by the `predict` method of the model, while the latter
combines artifact metadata and model metadata produced by the `train` function.
Prediction services have also a `model_id` field to keep track of which model
was used to predict.
See below for more details on how to call prediction services hosted in CDF.
## Deployment Platform
Model services (described below) are deployed to CDF, i.e. Cognite Data Fusion
CDF Functions include metadata when they are called. This information can be
used to redeploy a function (specifically, the `file_id` field). Example:
```python
import akerbp.mlops.cdf.helpers as cdf
human_readable_name = "My model"
external_id = "my_model-prediction-test"
cdf.set_up_cdf_client('deploy')
cdf.redeploy_function(
  human_readable_name
  external_id,
  file_id,
  'Description',
  'your@email.com'
)
```
Note that the external-id of a function needs to be unique, as this is used to distinguish functions between services and hosting environment.
It's possible to query available functions (can be filtered by environment
and/or tags). Example:
```python
import akerbp.mlops.cdf.helpers as cdf
cdf.set_up_cdf_client('deploy')
all_functions = cdf.list_functions()
test_functions = cdf.list_functions(env="test")
tag_functions = cdf.list_functions(tags=["well_interpretation"])
```
Functions can be deleted. Example:
```python
import akerbp.mlops.cdf.helpers as cdf
cdf.set_up_cdf_client('deploy')
cdf.delete_service("my_model-prediction-test")
```
Functions can be called in parallel. Example:
```python
from akerbp.mlops.cdf.helpers import call_function_parallel
function_name = 'my_function-prediction-prod'
data = [dict(data='data_call_1'), dict(data='data_call_2')]
response1, response2 = call_function_parallel(function_name, data)
```
## Model Manager
Model Manager is the module dedicated to managing the model artifacts used by
prediction services (and generated by training services). This module uses CDF
Files as backend.
Model artifacts are versioned and stored together with user-defined metadata.
Uploading a new model increases the version count by 1 for that model and
environment. When deploying a prediction service, the latest model version is
chosen. It would be possible to extend the framework to allow deploying specific
versions or filtering by metadata.
Model artifacts are segregated by environment (e.g. only production artifacts
can be deployed to production). Model artifacts have to be uploaded manually to
test (or dev) environment before deployment. Code example:
```python
import akerbp.mlops.model_manager as mm
metadata = train(model_dir, secrets) # or define it directly
mm.setup()
folder_info = mm.upload_new_model_version(
  model_name,
  env,
  folder_path,
  metadata
)
```
If there are multiple models, you need to do this one at at time. Note that
`model_name` corresponds to one of the elements in `model_names` defined in
`mlops_settings.py`, `env` is the target environment (where the model should be
available), `folder_path` is the local model artifact folder and `metadata` is a
dictionary with artifact metadata, e.g. performance, git commit, etc.
Model artifacts needs to be promoted to the production environment (i.e. after
they have been deployed successfully to test environment) so that a prediction
service can be deployed in production.
```python
# After a model's version has been successfully deployed to test
import akerbp.mlops.model_manager as mm
mm.setup()
mm.promote_model('model', 'version')
```
### Versioning
Each model artifact upload/promotion increments a version number (environment dependent)
available in Model Manager. However, this doesn't modify the model
artifacts used in existing prediction services (i.e. nothing changes in CDF
Functions). To reflect the newly uploaded/promoted model artifacts in the existing services one need to deploy the services again. Note that we dont have to specify the artifact version explicitly if we want to deploy using the latest artifacts, as this is done by default.
Recommended process to update a model artifact and prediction service:
1. New model features implemented in a feature branch
2. New artifact generated and uploaded to test environment
3. Feature branch merged with master
4. Test deployment is triggered automatically: prediction service is deployed to
   test environment with the latest artifact version (in test)
5. Prediction service in test is verified
6. Artifact version is promoted manually from command line whenever suitable
7. Production deployment is triggered manually from Bitbucket: prediction
   service is deployed to production with the latest artifact version (in prod)
It's possible to get an overview of the model artifacts managed by Model
Manager. Some examples (see `get_model_version_overview` documentation for other
possible queries):
```python
import akerbp.mlops.model_manager as mm
mm.setup()
# all artifacts
folder_info = mm.get_model_version_overview()
# all artifacts for a given model
folder_info = mm.get_model_version_overview(model_name='xx')
```
If the overview shows model artifacts that are not needed, it is possible to
remove them. For example if artifact "my_model/dev/5" is not needed:
```python
model_to_remove = "my_model/dev/5"
mm.delete_model_version(model_to_remove)
```
Model Manager will by default show information on the artifact to delete and ask
for user confirmation before proceeding. It's possible (but not recommended) to
disable this check. There's no identity check, so it's possible to delete any
model artifact (from other data scientist). Be careful!
It's possible to download a model artifact (e.g. to verify its content). For
example:
```python
mm.download_model_version('model_name', 'test', 'artifact_folder', version=5)
```
If no version is specified, the latest one is downloaded by default.
By default, Model Manager assumes artifacts are stored in the `mlops` dataset.
If your project uses a different one, you need to specify during setup (see
`setup` function).
Further information:
- Model Manager requires `COGNITE_API_KEY_*` environmental variables (see next
  section) or a suitable key passed to the `setup` function.
- In projects with a training service, you can rely on it to upload a first
  version of the model. The first prediction service deployment will fail, but
  you can deploy again after the training service has produced a model.
- When you deploy from the development environment (covered later in this
  document), the model artifacts in the settings file can point to existing
  local folders. These will then be used for the deployment. Version is then
  fixed to `model_name/dev/1`. Note that these artifacts are not uploaded to CDF
  Files.
- Prediction services are deployed with model artifacts (i.e. the artifact is
  copied to the project file used to create the CDF Function) so that they are
  available at prediction time. Downloading artifacts at run time would require
  waiting time, and files written during run time consume ram memory).
## Model versioning
To allow for model versioning and rolling back to previous model deployments, the external id of the functions (in CDF) includes a version number that is reflected by the latest artifact version number when deploying the function (see above).
Everytime we upload/promote new model artifacts and deploy our services, the version number of the external id of the functions representing the services are incremented (just as the version number for the artifacts).
To distinguish the latest model from the remaining model versions, we redeploy the latest model version using a predictable external id that does not contain the version number. By doing so we relieve the clients need of dealing with version numbers, and they will call the latest model by default. For every new deployment, we will thus have two model deployments - one with the version number, and one without the version number in the external id.  However, the predictable external id is persisted across new model versions, so when deploying a new version the latest one, with the predictable external id, is simply overwritten.
We are thus concerned with two structures for the external id
- ```<model_name>-<service>-<env>-<version>``` for rolling back to previous versions, and
- ```<model_name>-<service>-<env>``` for the latest deployed model
For the latest model with a predictable external id, we tag the description of the model to specify that the model is in fact the latest version, and add the version number to the function metadata.
We can now list out multiple models with the same model name and external id prefix, and choose to make predictions and do inference with a specific model version. An example is shown below.
```python
# List all prediction services (i.e. models) with name "My Model" hosted in the test environment, and model corresponding to the first element of the list
from cognite.client import CogniteClient, ClientConfig
from cognite.client.credentials import APIKey
cnf = ClientConfig(client_name="model inference", project="akbp-subsurface", credentials=APIKey("my-api-key"))
client = CogniteClient(config=cnf) # pass an arbitrary client_name
my_models = client.functions.list(name="My Model", external_id_prefix="mymodel-prediction-test")
my_model_specific_version = my_models[0]
```
## Calling a deployed model prediction service hosted in CDF
This section describes how you can call deployed models and obtain predictions for doing inference.
We have two options for calling a function in CDF, either using the MLOps framework directly or by using the Cognite SDK. Independent of how you call your model, you have to pass the data as a dictionary with a key "data" containing a dictionary with your data, where the keys of the inner dictionary specifies the columns, and the values are list of samples for the corresponding columns.
First, load your data and transform it to a dictionary as assumed by the framework. Note that the data dictionary you pass to the function might vary based on your model interface. Make sure to align with what you specified in your `model.py` interface.
```python
import pandas as pd
data = pd.read_csv("path_to_data")
input_data = data.drop(columns=[target_variables])
data_dict = {"data": input_data.to_dict(orient=list), "to_file": True}
```
The "to_file" key of the input data dictionary specifies how the predictions can be extracted downstream. More details are provided below
Calling deployed model using MLOps:
1. Set up a cognite client with sufficient access rights
2. Extract the response directly by specifying the external id of the model and passing your data as a dictionary
    - Note that the external id is on the form
      - ```"<model_name>-<service>-<env>-<version>"```, and
      - ```"<model_name>-<service>-<env>"```
Use the latter external id if you want to call the latest model. The former external id can be used if you want to call a previous version of your model.
```python
from akerbp.mlops.cdf.helpers import set_up_cdf_client, call_function
set_up_cdf_client(context="deploy") #access CDF data, files and functions with deploy context
response = call_function(function_name="<model_name>-prediction-<env>", data=data_dict)
```
Calling deployed model using the Cognite SDK:
1. set up cognite client with sufficient access rights
2. Retreive model from CDF by specifying the external-id of the model
3. Call the function
4. Extract the function call response from the function call
```python
from cognite.client import CogniteClient, ClientConfig
from cognite.client.credentials import APIKey
cnf = ClientConfig(client_name="model inference", project="akbp-subsurface", credentials=APIKey("my-api-key")) # pass an arbitrary client_name
client = CogniteClient(config=cnf)
function = client.functions.retrieve(external_id="<model_name>-prediction-<env>")
function_call = function.call(data=data_dict)
response = function_call.get_response()
```
Depending on how you specified the input dictionary, the predictions are available directly from the response or needs to be extracted from Cognite Files.
If the input data dictionary contains a key "to_file" with value True, the predictions are uploaded to cognite Files, and the 'prediction_file' field in the response will contain a reference to the file containing the predictions. If "to_file" is set to False, or if the input dictionary does not contain such a key-value pair, the predictions are directly available through the function call response.
If "to_file" = True, we can extract the predictions using the following code-snippet
```python
file_id = response["prediction_file"]
bytes_data = client.files.download_bytes(external_id=file_id)
predictions_df = pd.DataFrame.from_dict(json.loads(bytes_data))
```
Otherwise, the predictions are directly accessible from the response as follows.
```python
predictions = response["predictions"]
```
## Extracting metadata from deployed model in CDF
Once a model is deployed, a user can extract potentially valuable metadata as follows.
```python
my_function = client.functions.retrieve(external_id="my_model-prediction-test")
metadata = my_function.metadata
```
Where the metadata corresponds to whatever you specified in the mlops_settings.yaml file. For this example we get the following metadata
```
{'cat_filler': 'UNKNOWN',
 'imputed': 'True',
 'input_types': '[int, float, string]',
 'num_filler': '-999.15',
 'output_curves': '[AC]',
 'output_unit': '[s/ft]',
 'petrel_exposure': 'False',
 'required_input': '[ACS, RDEP, DEN]',
 'training_wells': '[3/1-4]',
 'units': '[s/ft, 1, kg/m3]'}
```
## Local Testing and Deployment
It's possible to tests the functions locally, which can help you debug errors
quickly. This is recommended before a deployment.
Define the following environmental variables (e.g. in `.bashrc`):
```bash
export ENV=dev
export COGNITE_API_KEY_PERSONAL=xxx
export COGNITE_API_KEY_FUNCTIONS=$COGNITE_API_KEY_PERSONAL
export COGNITE_API_KEY_DATA=$COGNITE_API_KEY_PERSONAL
export COGNITE_API_KEY_FILES=$COGNITE_API_KEY_PERSONAL
```
From your repo's root folder:
- `python -m pytest model_code` (replace `model_code` by your model code folder
  name)
- `deploy_prediction_service.sh`
- `deploy_training_service.sh` (if there's a training service)
The first one will run your model tests. The last two run model tests but also
the service tests implemented in the framework and simulate deployment.
If you really want to deploy from your development environment, you can run
this: `LOCAL_DEPLOYMENT=True deploy_prediction_service.sh`
Note that, in case of emergency, it's possible to deploy to test or production
from your local environment, e.g. : `LOCAL_DEPLOYMENT=True ENV=test deploy_prediction_service.sh`
If you want to run tests only you need to set `TESTING_ONLY=True` before calling the deployment script.
## Automated Deployments from Bitbucket
Deployments to the test environment are triggered by commits (you need to push
them). Deployments to the production environment are enabled manually from the
Bitbucket pipeline dashboard. Branches that match 'deploy/*' behave as master.
Branches that match `feature/*` run tests only (i.e. do not deploy).
It is assumed that most projects won't include a training service. A branch that
matches 'mlops/*' deploys both prediction and training services. If a project
includes both services, the pipeline file could instead be edited so that master
deployed both services.
It is possible to schedule the training service in CDF, and then it can make
sense to schedule the deployment pipeline of the model service (as often as new
models are trained)
NOTE: Previous version of akerbp.mlops assumes that calling
`LOCAL_DEPLOYMENT=True bash deploy_prediction_service.sh` will not deploy models and run tests.
The package is now refactored to only trigger tests when the environment variable
`TESTING_ONLY` is set to `True`, and allows to deploy locally when setting `LOCAL_DEPLOYMENT=True`.
Make sure to update the pipeline definition for branches with prefix `feature/`to call
`TESTING_ONLY=True bash deploy_prediction_service.sh` instead.
## Bitbucket Setup
The following environments need to be defined in `repository settings >
deployments`:
- test deployments: `test-prediction` and `test-training`, each with `ENV=test`
- production deployments: `production-prediction` and `production-training`,
  each with `ENV=prod`
The following need to be defined in `respository settings > repository
variables`: `COGNITE_API_KEY_DATA`, `COGNITE_API_KEY_FUNCTIONS`,
`COGNITE_API_KEY_FILES` (these should be CDF keys with access to data, functions
and files).
The pipeline needs to be enabled.
# Developer/Admin Guide
## Package versioning
The versioning of the package follows [Semantic Versioning 2.0.0](https://semver.org/), using the `MAJOR.MINOR.PATCH` structure. We are thus updating the package version using the following convention
1. Increment MAJOR when making incompatible API changes
2. Increment MINOR when adding backwards compatible functionality
3. Increment PATCH when making backwards compatible bug-fixes
The version is updated based on the latest commit to the repo, and we are currently using the following rules.
- The MAJOR version is incremented if the commit message includes the word `major`
- The MINOR version is incremented if the commit message includes the word `minor`
- The PATCH number is incremented if neither `major` nor `minor` if found in the commit message
- If the commit message includes the phrase `pre-release`, the package version is extended with `-alpha`, thus taking the form `MAJOR.MINOR.PATCH-alpha`.
Note that the above keywords are **not** case sensitive. Moreover, `major` takes precedence over `minor`, so if both keywords are found in the commit message, the MAJOR version is incremented and the MINOR version is kept unchanged.
In dev and test environment, we release the package using the pre-release tag, and the package takes the following version number `MAJOR.MINOR.PATCH-alpha`.
The version number is kept track of in the `version.txt` file. Because this is updated during the pipeline build when releasing the package to PyPI (both in test and prod), we have to pull from git every time a new version is released.
## MLOps Files and Folders
These are the files and folders in the MLOps repo:
- `src` contains the MLOps framework package
- `mlops_settings.yaml` contains the user settings for the dummy model
- `model_code` is a model template included to show the model interface. It is
  not needed by the framework, but it is recommended to become familiar with it.
- `model_artifact` stores the artifacts for the model shown in  `model_code`.
  This is to help to test the model and learn the framework.
- `bitbucket-pipelines.yml` describes the deployment pipeline in Bitbucket
- `build.sh` is the script to build and upload the package
- `setup.py` is used to build the package
- `version.txt` keep track of the package version number
- `LICENSE` is the package's license
## CDF Datasets
In order to control access to the artifacts:
1. Set up a CDF Dataset with `write_protected=True` and a `external_id`, which
   by default is expected to be `mlops`.
2. Create a group of owners (CDF Dashboard), i.e. those that should have write
   access
## Build and Upload Package
Create an account in pypi, then create a token and a `$HOME/.pypirc` file. Edit
`setup.py` file and note the following:
- Dependencies need to be registered
- Bash scripts will be installed in a `bin` folder in the `PATH`.
The pipeline is setup to build the library from Bitbucket, but it's possible to
build and upload the library from the development environment as well:
```bash
bash build.sh
```
In general this is required before `LOCAL_DEPLOYMENT=True bash
deploy_xxx_service.sh`. The exception is if local changes affect only the
deployment part of the library, and the library has been installed in developer
mode with:
```bash
pip install -e .
```
In this mode, the installed package links to the source code, so that it can be
modified without the need to reinstall).
## Bitbucket Setup
In addition to the user setup, the following is needed to build the package:
- `test-pypi`: `ENV=test`, `TWINE_USERNAME=__token__` and `TWINE_PASSWORD`
  (token generated from pypi)
- `prod-pypi`: `ENV=prod`, `TWINE_USERNAME=__token__` and `TWINE_PASSWORD`
  (token generated from pypi, can be the same as above)
## Notes on the code
Service testing happens in an independent process (subprocess library) to avoid
setup problems:
 - When deploying multiple models the service had to be reloaded before testing
   it, otherwise it would be the first model's service. Model initialization in
   the prediction service is designed to load artifacts only once in the process
 - If the model and the MLOps framework rely on different versions of the same
   library, the version would be changed during runtime, but the
   upgraded/downgraded version would not be available for the current process

%package help
Summary:	Development documents and examples for akerbp.mlops
Provides:	python3-akerbp.mlops-doc
%description help
model_name: model2
human_friendly_model: 'My Second Model'
model_file: model_code/model2.py
(...)
```
## Files and Folders Structure
All the model code and files should be under a single folder, e.g. `model_code`.
**Required** files in this folder:
- `model.py`: implements the standard model interface
- `test_model.py`: tests to verify that the model code is correct and to verify
  correct deployment
- `requirements.model`: libraries needed (with specific **version numbers**),
  can't be called `requirements.txt`. Add the MLOps framework like this:
  ```bash
  # requirements.model
  (...) # your other reqs
  akerbp.mlops==MLOPS_VERSION
  ```
  During deployment, `MLOPS_VERSION` will be automatically replaced by the
  specific version **that you have installed locally**. Make sure you have the latest release on your local machine prior to model deployment.
For the prediction service we require the model interface to have the following class and function
  - initialization(), with required arguments
    - path to artifact folder
    - secrets
      - these arguments can safely be set to None, and the framework will handle everything under the hood.
      - only set path to artifact folder as None if not using any artifacts
  - predict(), with required arguments
    - data
    - init_object (output from initialization() function)
    - secrets
      - You can safely put the secrets argument to None, and the framework will handle the secrets under the hood.
  - ModelException class with inheritance from an Exception base class
For the training service we require the model interface to have the following class and function
  - train(), with required arguments
    - folder_path
      - path to store model artifacts to be consumed by the prediction service
  - ModelException class with inheritance from an Exception base class
The following structure is recommended for projects with multiple models:
- `model_code/model1/`
- `model_code/model2/`
- `model_code/common_code/`
This is because when deploying a model, e.g. `model1`, the top folder in the
path (`model_code` in the example above) is copied and deployed, i.e.
`common_code` folder (assumed to be needed by `model1`) is included. Note that
`model2` folder would also be deployed (this is assumed to be unnecessary but
harmless).
## Import Guidelines
The repo's root folder is the base folder when importing. For example, assume
you have these files in the folder with model code:
 - `model_code/model.py`
 - `model_code/helper.py`
 - `model_code/data.csv`
If `model.py` needs to import `helper.py`, use: `import model_code.helper`. If
`model.py` needs to read `data.csv`, the right path is
`os.path.join('model_code', 'data.csv')`.
It's of course possible to import from the Mlops package, e.g. its logger:
``` python
from akerbp.mlops.core import logger
logging=logger.get_logger("logger_name")
logging.debug("This is a debug log")
```
## Services
We consider two types of services: prediction and training.
Deployed services can be called with
```python
from akerbp.mlops.xx.helpers import call_function
output = call_function(external_id, data)
```
Where `xx` is either `'cdf'` or `'gc'`, and `external_id` follows the
structure `model-service-env`:
 - `model`: model name given by the user (settings file)
 - `service`: either `training` or `prediction`
 - `env`: either `dev`, `test` or `prod` (depending on the deployment
   environment)
The output has a status field (`ok` or `error`). If they are 'ok', they have
also a `prediction` and `prediction_file` or `training` field (depending on the type of service). The
former is determined by the `predict` method of the model, while the latter
combines artifact metadata and model metadata produced by the `train` function.
Prediction services have also a `model_id` field to keep track of which model
was used to predict.
See below for more details on how to call prediction services hosted in CDF.
## Deployment Platform
Model services (described below) are deployed to CDF, i.e. Cognite Data Fusion
CDF Functions include metadata when they are called. This information can be
used to redeploy a function (specifically, the `file_id` field). Example:
```python
import akerbp.mlops.cdf.helpers as cdf
human_readable_name = "My model"
external_id = "my_model-prediction-test"
cdf.set_up_cdf_client('deploy')
cdf.redeploy_function(
  human_readable_name
  external_id,
  file_id,
  'Description',
  'your@email.com'
)
```
Note that the external-id of a function needs to be unique, as this is used to distinguish functions between services and hosting environment.
It's possible to query available functions (can be filtered by environment
and/or tags). Example:
```python
import akerbp.mlops.cdf.helpers as cdf
cdf.set_up_cdf_client('deploy')
all_functions = cdf.list_functions()
test_functions = cdf.list_functions(env="test")
tag_functions = cdf.list_functions(tags=["well_interpretation"])
```
Functions can be deleted. Example:
```python
import akerbp.mlops.cdf.helpers as cdf
cdf.set_up_cdf_client('deploy')
cdf.delete_service("my_model-prediction-test")
```
Functions can be called in parallel. Example:
```python
from akerbp.mlops.cdf.helpers import call_function_parallel
function_name = 'my_function-prediction-prod'
data = [dict(data='data_call_1'), dict(data='data_call_2')]
response1, response2 = call_function_parallel(function_name, data)
```
## Model Manager
Model Manager is the module dedicated to managing the model artifacts used by
prediction services (and generated by training services). This module uses CDF
Files as backend.
Model artifacts are versioned and stored together with user-defined metadata.
Uploading a new model increases the version count by 1 for that model and
environment. When deploying a prediction service, the latest model version is
chosen. It would be possible to extend the framework to allow deploying specific
versions or filtering by metadata.
Model artifacts are segregated by environment (e.g. only production artifacts
can be deployed to production). Model artifacts have to be uploaded manually to
test (or dev) environment before deployment. Code example:
```python
import akerbp.mlops.model_manager as mm
metadata = train(model_dir, secrets) # or define it directly
mm.setup()
folder_info = mm.upload_new_model_version(
  model_name,
  env,
  folder_path,
  metadata
)
```
If there are multiple models, you need to do this one at at time. Note that
`model_name` corresponds to one of the elements in `model_names` defined in
`mlops_settings.py`, `env` is the target environment (where the model should be
available), `folder_path` is the local model artifact folder and `metadata` is a
dictionary with artifact metadata, e.g. performance, git commit, etc.
Model artifacts needs to be promoted to the production environment (i.e. after
they have been deployed successfully to test environment) so that a prediction
service can be deployed in production.
```python
# After a model's version has been successfully deployed to test
import akerbp.mlops.model_manager as mm
mm.setup()
mm.promote_model('model', 'version')
```
### Versioning
Each model artifact upload/promotion increments a version number (environment dependent)
available in Model Manager. However, this doesn't modify the model
artifacts used in existing prediction services (i.e. nothing changes in CDF
Functions). To reflect the newly uploaded/promoted model artifacts in the existing services one need to deploy the services again. Note that we dont have to specify the artifact version explicitly if we want to deploy using the latest artifacts, as this is done by default.
Recommended process to update a model artifact and prediction service:
1. New model features implemented in a feature branch
2. New artifact generated and uploaded to test environment
3. Feature branch merged with master
4. Test deployment is triggered automatically: prediction service is deployed to
   test environment with the latest artifact version (in test)
5. Prediction service in test is verified
6. Artifact version is promoted manually from command line whenever suitable
7. Production deployment is triggered manually from Bitbucket: prediction
   service is deployed to production with the latest artifact version (in prod)
It's possible to get an overview of the model artifacts managed by Model
Manager. Some examples (see `get_model_version_overview` documentation for other
possible queries):
```python
import akerbp.mlops.model_manager as mm
mm.setup()
# all artifacts
folder_info = mm.get_model_version_overview()
# all artifacts for a given model
folder_info = mm.get_model_version_overview(model_name='xx')
```
If the overview shows model artifacts that are not needed, it is possible to
remove them. For example if artifact "my_model/dev/5" is not needed:
```python
model_to_remove = "my_model/dev/5"
mm.delete_model_version(model_to_remove)
```
Model Manager will by default show information on the artifact to delete and ask
for user confirmation before proceeding. It's possible (but not recommended) to
disable this check. There's no identity check, so it's possible to delete any
model artifact (from other data scientist). Be careful!
It's possible to download a model artifact (e.g. to verify its content). For
example:
```python
mm.download_model_version('model_name', 'test', 'artifact_folder', version=5)
```
If no version is specified, the latest one is downloaded by default.
By default, Model Manager assumes artifacts are stored in the `mlops` dataset.
If your project uses a different one, you need to specify during setup (see
`setup` function).
Further information:
- Model Manager requires `COGNITE_API_KEY_*` environmental variables (see next
  section) or a suitable key passed to the `setup` function.
- In projects with a training service, you can rely on it to upload a first
  version of the model. The first prediction service deployment will fail, but
  you can deploy again after the training service has produced a model.
- When you deploy from the development environment (covered later in this
  document), the model artifacts in the settings file can point to existing
  local folders. These will then be used for the deployment. Version is then
  fixed to `model_name/dev/1`. Note that these artifacts are not uploaded to CDF
  Files.
- Prediction services are deployed with model artifacts (i.e. the artifact is
  copied to the project file used to create the CDF Function) so that they are
  available at prediction time. Downloading artifacts at run time would require
  waiting time, and files written during run time consume ram memory).
## Model versioning
To allow for model versioning and rolling back to previous model deployments, the external id of the functions (in CDF) includes a version number that is reflected by the latest artifact version number when deploying the function (see above).
Everytime we upload/promote new model artifacts and deploy our services, the version number of the external id of the functions representing the services are incremented (just as the version number for the artifacts).
To distinguish the latest model from the remaining model versions, we redeploy the latest model version using a predictable external id that does not contain the version number. By doing so we relieve the clients need of dealing with version numbers, and they will call the latest model by default. For every new deployment, we will thus have two model deployments - one with the version number, and one without the version number in the external id.  However, the predictable external id is persisted across new model versions, so when deploying a new version the latest one, with the predictable external id, is simply overwritten.
We are thus concerned with two structures for the external id
- ```<model_name>-<service>-<env>-<version>``` for rolling back to previous versions, and
- ```<model_name>-<service>-<env>``` for the latest deployed model
For the latest model with a predictable external id, we tag the description of the model to specify that the model is in fact the latest version, and add the version number to the function metadata.
We can now list out multiple models with the same model name and external id prefix, and choose to make predictions and do inference with a specific model version. An example is shown below.
```python
# List all prediction services (i.e. models) with name "My Model" hosted in the test environment, and model corresponding to the first element of the list
from cognite.client import CogniteClient, ClientConfig
from cognite.client.credentials import APIKey
cnf = ClientConfig(client_name="model inference", project="akbp-subsurface", credentials=APIKey("my-api-key"))
client = CogniteClient(config=cnf) # pass an arbitrary client_name
my_models = client.functions.list(name="My Model", external_id_prefix="mymodel-prediction-test")
my_model_specific_version = my_models[0]
```
## Calling a deployed model prediction service hosted in CDF
This section describes how you can call deployed models and obtain predictions for doing inference.
We have two options for calling a function in CDF, either using the MLOps framework directly or by using the Cognite SDK. Independent of how you call your model, you have to pass the data as a dictionary with a key "data" containing a dictionary with your data, where the keys of the inner dictionary specifies the columns, and the values are list of samples for the corresponding columns.
First, load your data and transform it to a dictionary as assumed by the framework. Note that the data dictionary you pass to the function might vary based on your model interface. Make sure to align with what you specified in your `model.py` interface.
```python
import pandas as pd
data = pd.read_csv("path_to_data")
input_data = data.drop(columns=[target_variables])
data_dict = {"data": input_data.to_dict(orient=list), "to_file": True}
```
The "to_file" key of the input data dictionary specifies how the predictions can be extracted downstream. More details are provided below
Calling deployed model using MLOps:
1. Set up a cognite client with sufficient access rights
2. Extract the response directly by specifying the external id of the model and passing your data as a dictionary
    - Note that the external id is on the form
      - ```"<model_name>-<service>-<env>-<version>"```, and
      - ```"<model_name>-<service>-<env>"```
Use the latter external id if you want to call the latest model. The former external id can be used if you want to call a previous version of your model.
```python
from akerbp.mlops.cdf.helpers import set_up_cdf_client, call_function
set_up_cdf_client(context="deploy") #access CDF data, files and functions with deploy context
response = call_function(function_name="<model_name>-prediction-<env>", data=data_dict)
```
Calling deployed model using the Cognite SDK:
1. set up cognite client with sufficient access rights
2. Retreive model from CDF by specifying the external-id of the model
3. Call the function
4. Extract the function call response from the function call
```python
from cognite.client import CogniteClient, ClientConfig
from cognite.client.credentials import APIKey
cnf = ClientConfig(client_name="model inference", project="akbp-subsurface", credentials=APIKey("my-api-key")) # pass an arbitrary client_name
client = CogniteClient(config=cnf)
function = client.functions.retrieve(external_id="<model_name>-prediction-<env>")
function_call = function.call(data=data_dict)
response = function_call.get_response()
```
Depending on how you specified the input dictionary, the predictions are available directly from the response or needs to be extracted from Cognite Files.
If the input data dictionary contains a key "to_file" with value True, the predictions are uploaded to cognite Files, and the 'prediction_file' field in the response will contain a reference to the file containing the predictions. If "to_file" is set to False, or if the input dictionary does not contain such a key-value pair, the predictions are directly available through the function call response.
If "to_file" = True, we can extract the predictions using the following code-snippet
```python
file_id = response["prediction_file"]
bytes_data = client.files.download_bytes(external_id=file_id)
predictions_df = pd.DataFrame.from_dict(json.loads(bytes_data))
```
Otherwise, the predictions are directly accessible from the response as follows.
```python
predictions = response["predictions"]
```
## Extracting metadata from deployed model in CDF
Once a model is deployed, a user can extract potentially valuable metadata as follows.
```python
my_function = client.functions.retrieve(external_id="my_model-prediction-test")
metadata = my_function.metadata
```
Where the metadata corresponds to whatever you specified in the mlops_settings.yaml file. For this example we get the following metadata
```
{'cat_filler': 'UNKNOWN',
 'imputed': 'True',
 'input_types': '[int, float, string]',
 'num_filler': '-999.15',
 'output_curves': '[AC]',
 'output_unit': '[s/ft]',
 'petrel_exposure': 'False',
 'required_input': '[ACS, RDEP, DEN]',
 'training_wells': '[3/1-4]',
 'units': '[s/ft, 1, kg/m3]'}
```
## Local Testing and Deployment
It's possible to tests the functions locally, which can help you debug errors
quickly. This is recommended before a deployment.
Define the following environmental variables (e.g. in `.bashrc`):
```bash
export ENV=dev
export COGNITE_API_KEY_PERSONAL=xxx
export COGNITE_API_KEY_FUNCTIONS=$COGNITE_API_KEY_PERSONAL
export COGNITE_API_KEY_DATA=$COGNITE_API_KEY_PERSONAL
export COGNITE_API_KEY_FILES=$COGNITE_API_KEY_PERSONAL
```
From your repo's root folder:
- `python -m pytest model_code` (replace `model_code` by your model code folder
  name)
- `deploy_prediction_service.sh`
- `deploy_training_service.sh` (if there's a training service)
The first one will run your model tests. The last two run model tests but also
the service tests implemented in the framework and simulate deployment.
If you really want to deploy from your development environment, you can run
this: `LOCAL_DEPLOYMENT=True deploy_prediction_service.sh`
Note that, in case of emergency, it's possible to deploy to test or production
from your local environment, e.g. : `LOCAL_DEPLOYMENT=True ENV=test deploy_prediction_service.sh`
If you want to run tests only you need to set `TESTING_ONLY=True` before calling the deployment script.
## Automated Deployments from Bitbucket
Deployments to the test environment are triggered by commits (you need to push
them). Deployments to the production environment are enabled manually from the
Bitbucket pipeline dashboard. Branches that match 'deploy/*' behave as master.
Branches that match `feature/*` run tests only (i.e. do not deploy).
It is assumed that most projects won't include a training service. A branch that
matches 'mlops/*' deploys both prediction and training services. If a project
includes both services, the pipeline file could instead be edited so that master
deployed both services.
It is possible to schedule the training service in CDF, and then it can make
sense to schedule the deployment pipeline of the model service (as often as new
models are trained)
NOTE: Previous version of akerbp.mlops assumes that calling
`LOCAL_DEPLOYMENT=True bash deploy_prediction_service.sh` will not deploy models and run tests.
The package is now refactored to only trigger tests when the environment variable
`TESTING_ONLY` is set to `True`, and allows to deploy locally when setting `LOCAL_DEPLOYMENT=True`.
Make sure to update the pipeline definition for branches with prefix `feature/`to call
`TESTING_ONLY=True bash deploy_prediction_service.sh` instead.
## Bitbucket Setup
The following environments need to be defined in `repository settings >
deployments`:
- test deployments: `test-prediction` and `test-training`, each with `ENV=test`
- production deployments: `production-prediction` and `production-training`,
  each with `ENV=prod`
The following need to be defined in `respository settings > repository
variables`: `COGNITE_API_KEY_DATA`, `COGNITE_API_KEY_FUNCTIONS`,
`COGNITE_API_KEY_FILES` (these should be CDF keys with access to data, functions
and files).
The pipeline needs to be enabled.
# Developer/Admin Guide
## Package versioning
The versioning of the package follows [Semantic Versioning 2.0.0](https://semver.org/), using the `MAJOR.MINOR.PATCH` structure. We are thus updating the package version using the following convention
1. Increment MAJOR when making incompatible API changes
2. Increment MINOR when adding backwards compatible functionality
3. Increment PATCH when making backwards compatible bug-fixes
The version is updated based on the latest commit to the repo, and we are currently using the following rules.
- The MAJOR version is incremented if the commit message includes the word `major`
- The MINOR version is incremented if the commit message includes the word `minor`
- The PATCH number is incremented if neither `major` nor `minor` if found in the commit message
- If the commit message includes the phrase `pre-release`, the package version is extended with `-alpha`, thus taking the form `MAJOR.MINOR.PATCH-alpha`.
Note that the above keywords are **not** case sensitive. Moreover, `major` takes precedence over `minor`, so if both keywords are found in the commit message, the MAJOR version is incremented and the MINOR version is kept unchanged.
In dev and test environment, we release the package using the pre-release tag, and the package takes the following version number `MAJOR.MINOR.PATCH-alpha`.
The version number is kept track of in the `version.txt` file. Because this is updated during the pipeline build when releasing the package to PyPI (both in test and prod), we have to pull from git every time a new version is released.
## MLOps Files and Folders
These are the files and folders in the MLOps repo:
- `src` contains the MLOps framework package
- `mlops_settings.yaml` contains the user settings for the dummy model
- `model_code` is a model template included to show the model interface. It is
  not needed by the framework, but it is recommended to become familiar with it.
- `model_artifact` stores the artifacts for the model shown in  `model_code`.
  This is to help to test the model and learn the framework.
- `bitbucket-pipelines.yml` describes the deployment pipeline in Bitbucket
- `build.sh` is the script to build and upload the package
- `setup.py` is used to build the package
- `version.txt` keep track of the package version number
- `LICENSE` is the package's license
## CDF Datasets
In order to control access to the artifacts:
1. Set up a CDF Dataset with `write_protected=True` and a `external_id`, which
   by default is expected to be `mlops`.
2. Create a group of owners (CDF Dashboard), i.e. those that should have write
   access
## Build and Upload Package
Create an account in pypi, then create a token and a `$HOME/.pypirc` file. Edit
`setup.py` file and note the following:
- Dependencies need to be registered
- Bash scripts will be installed in a `bin` folder in the `PATH`.
The pipeline is setup to build the library from Bitbucket, but it's possible to
build and upload the library from the development environment as well:
```bash
bash build.sh
```
In general this is required before `LOCAL_DEPLOYMENT=True bash
deploy_xxx_service.sh`. The exception is if local changes affect only the
deployment part of the library, and the library has been installed in developer
mode with:
```bash
pip install -e .
```
In this mode, the installed package links to the source code, so that it can be
modified without the need to reinstall).
## Bitbucket Setup
In addition to the user setup, the following is needed to build the package:
- `test-pypi`: `ENV=test`, `TWINE_USERNAME=__token__` and `TWINE_PASSWORD`
  (token generated from pypi)
- `prod-pypi`: `ENV=prod`, `TWINE_USERNAME=__token__` and `TWINE_PASSWORD`
  (token generated from pypi, can be the same as above)
## Notes on the code
Service testing happens in an independent process (subprocess library) to avoid
setup problems:
 - When deploying multiple models the service had to be reloaded before testing
   it, otherwise it would be the first model's service. Model initialization in
   the prediction service is designed to load artifacts only once in the process
 - If the model and the MLOps framework rely on different versions of the same
   library, the version would be changed during runtime, but the
   upgraded/downgraded version would not be available for the current process

%prep
%autosetup -n akerbp.mlops-2.5.8

%build
%py3_build

%install
%py3_install
install -d -m755 %{buildroot}/%{_pkgdocdir}
if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
pushd %{buildroot}
if [ -d usr/lib ]; then
	find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
fi
if [ -d usr/lib64 ]; then
	find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
fi
if [ -d usr/bin ]; then
	find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
fi
if [ -d usr/sbin ]; then
	find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
fi
touch doclist.lst
if [ -d usr/share/man ]; then
	find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
fi
popd
mv %{buildroot}/filelist.lst .
mv %{buildroot}/doclist.lst .

%files -n python3-akerbp.mlops -f filelist.lst
%dir %{python3_sitelib}/*

%files help -f doclist.lst
%{_docdir}/*

%changelog
* Sun Apr 23 2023 Python_Bot <Python_Bot@openeuler.org> - 2.5.8-1
- Package Spec generated