diff options
Diffstat (limited to 'python-cdk-aurora-globaldatabase.spec')
| -rw-r--r-- | python-cdk-aurora-globaldatabase.spec | 650 |
1 files changed, 650 insertions, 0 deletions
diff --git a/python-cdk-aurora-globaldatabase.spec b/python-cdk-aurora-globaldatabase.spec new file mode 100644 index 0000000..0ee701c --- /dev/null +++ b/python-cdk-aurora-globaldatabase.spec @@ -0,0 +1,650 @@ +%global _empty_manifest_terminate_build 0 +Name: python-cdk-aurora-globaldatabase +Version: 2.3.114 +Release: 1 +Summary: cdk-aurora-globaldatabase is an AWS CDK construct library that provides Cross Region Create Global Aurora RDS Databases. +License: Apache-2.0 +URL: https://github.com/neilkuan/cdk-aurora-globaldatabase.git +Source0: https://mirrors.nju.edu.cn/pypi/web/packages/32/ef/cd707713e7b8d2bb9ea7acdf5021ec5376cc7f6bb1cc814af3d602725314/cdk-aurora-globaldatabase-2.3.114.tar.gz +BuildArch: noarch + +Requires: python3-aws-cdk-lib +Requires: python3-constructs +Requires: python3-jsii +Requires: python3-publication +Requires: python3-typeguard + +%description +[](https://badge.fury.io/js/cdk-aurora-globaldatabase) +[](https://badge.fury.io/py/cdk-aurora-globaldatabase) + + + + + + +# cdk-aurora-globaldatabase + +`cdk-aurora-globaldatabase` is an AWS CDK construct library that allows you to create [Amazon Aurora Global Databases](https://aws.amazon.com/rds/aurora/global-database/) with AWS CDK in Typescript or Python. + +# Why + +**Amazon Aurora Global Databases** is designed for multi-regional Amazon Aurora Database clusters that span across different AWS regions. Due to the lack of native cloudformation support, it has been very challenging to build with cloudformation or AWS CDK with the upstream `aws-rds` construct. + +`cdk-aurora-globaldatabase` aims to offload the heavy-lifting and helps you provision and deploy cross-regional **Amazon Aurora Global Databases** simply with just a few CDK statements. + +## Install + +```bash +Use the npm dist tag to opt in CDKv1 or CDKv2: + +// for CDKv2 +npm install cdk-aurora-globaldatabase +or +npm install cdk-aurora-globaldatabase@latest + +// for CDKv1 +npm install cdk-aurora-globaldatabase@cdkv1 +``` + +# ⛔️ Please do not use cdk v1, because lot of db engine version already not been update in @aws-cdk/aws-rds upstream. ⛔️ + +## Now Try It !!! + +# Sample for Mysql + +```python +import { GlobalAuroraRDSMaster, InstanceTypeEnum, GlobalAuroraRDSSlaveInfra } from 'cdk-aurora-globaldatabase'; +import { App, Stack, CfnOutput } from 'aws-cdk-lib'; +import * as ec2 from 'aws-cdk-lib/aws-ec2'; +// new app . +const mockApp = new App(); + +// setting two region env config . +const envSingapro = { account: process.env.CDK_DEFAULT_ACCOUNT, region: 'ap-southeast-1' }; +const envTokyo = { account: process.env.CDK_DEFAULT_ACCOUNT, region: 'ap-northeast-1' }; + +// create stack main . +const stackM = new Stack(mockApp, 'testing-stackM',{env: envTokyo}); +const vpcPublic = new ec2.Vpc(stackM,'defaultVpc',{ + natGateways: 0, + maxAzs: 3, + subnetConfiguration: [{ + cidrMask: 26, + name: 'masterVPC2', + subnetType: ec2.SubnetType.PUBLIC, + }], +}); +const globaldbM = new GlobalAuroraRDSMaster(stackM, 'globalAuroraRDSMaster',{ + instanceType: InstanceTypeEnum.R5_LARGE, + vpc: vpcPublic, + rdsPassword: '1qaz2wsx', +}); +globaldbM.rdsCluster.connections.allowDefaultPortFrom(ec2.Peer.ipv4(`${process.env.MYIP}/32`)) + +// create stack slave infra or you can give your subnet group. +const stackS = new Stack(mockApp, 'testing-stackS',{env: envSingapro}); +const vpcPublic2 = new ec2.Vpc(stackS,'defaultVpc2',{ + natGateways: 0, + maxAzs: 3, + subnetConfiguration: [{ + cidrMask: 26, + name: 'secondVPC2', + subnetType: ec2.SubnetType.PUBLIC, + }], +}); +const globaldbS = new GlobalAuroraRDSSlaveInfra(stackS, 'slaveregion',{vpc: vpcPublic2,subnetType:ec2.SubnetType.PUBLIC }); + +// so we need to wait stack slave created first . +stackM.addDependency(stackS) + + +new CfnOutput(stackM, 'password', { value: globaldbM.rdsPassword }); +// add second region cluster +globaldbM.addRegionalCluster(stackM,'addregionalrds',{ + region: 'ap-southeast-1', + dbSubnetGroupName: globaldbS.dbSubnetGroup.dbSubnetGroupName, +}); +``` + + + +# Sample for Postgres + +```python +import { GlobalAuroraRDSMaster, InstanceTypeEnum, GlobalAuroraRDSSlaveInfra } from 'cdk-aurora-globaldatabase'; +import { App, Stack, CfnOutput } from 'aws-cdk-lib'; +import * as ec2 from 'aws-cdk-lib/aws-ec2'; +import * as _rds from 'aws-cdk-lib/aws-rds'; + +const mockApp = new App(); +const envSingapro = { account: process.env.CDK_DEFAULT_ACCOUNT, region: 'ap-southeast-1' }; +const envTokyo = { account: process.env.CDK_DEFAULT_ACCOUNT, region: 'ap-northeast-1' }; + +const stackM = new Stack(mockApp, 'testing-stackM',{env: envTokyo}); +const vpcPublic = new ec2.Vpc(stackM,'defaultVpc',{ + natGateways: 0, + maxAzs: 3, + subnetConfiguration: [{ + cidrMask: 26, + name: 'masterVPC2', + subnetType: ec2.SubnetType.PUBLIC, + }], +}); + +// Note if you use postgres , need to give the same value in engineVersion and dbClusterpPG's engine . +const globaldbM = new GlobalAuroraRDSMaster(stackM, 'globalAuroraRDSMaster',{ + instanceType: InstanceTypeEnum.R5_LARGE, + vpc: vpcPublic, + rdsPassword: '1qaz2wsx', + engineVersion: _rds.DatabaseClusterEngine.auroraPostgres({ + version: _rds.AuroraPostgresEngineVersion.VER_12_11}), + dbClusterpPG: new _rds.ParameterGroup(stackM, 'dbClusterparametergroup', { + engine: _rds.DatabaseClusterEngine.auroraPostgres({ + version: _rds.AuroraPostgresEngineVersion.VER_12_11, + }), + parameters: { + 'rds.force_ssl': '1', + 'rds.log_retention_period': '10080', + 'auto_explain.log_min_duration': '5000', + 'auto_explain.log_verbose': '1', + 'timezone': 'UTC+8', + 'shared_preload_libraries': 'auto_explain,pg_stat_statements,pg_hint_plan,pgaudit', + 'log_connections': '1', + 'log_statement': 'ddl', + 'log_disconnections': '1', + 'log_lock_waits': '1', + 'log_min_duration_statement': '5000', + 'log_rotation_age': '1440', + 'log_rotation_size': '102400', + 'random_page_cost': '1', + 'track_activity_query_size': '16384', + 'idle_in_transaction_session_timeout': '7200000', + }, + }), +}); +globaldbM.rdsCluster.connections.allowDefaultPortFrom(ec2.Peer.ipv4(`${process.env.MYIP}/32`)) + +const stackS = new Stack(mockApp, 'testing-stackS',{env: envSingapro}); +const vpcPublic2 = new ec2.Vpc(stackS,'defaultVpc2',{ + natGateways: 0, + maxAzs: 3, + subnetConfiguration: [{ + cidrMask: 26, + name: 'secondVPC2', + subnetType: ec2.SubnetType.PUBLIC, + }], +}); +const globaldbS = new GlobalAuroraRDSSlaveInfra(stackS, 'slaveregion',{ + vpc: vpcPublic2,subnetType:ec2.SubnetType.PUBLIC, +}); + +stackM.addDependency(stackS) + + +new CfnOutput(stackM, 'password', { value: globaldbM.rdsPassword }); +// add second region cluster +globaldbM.addRegionalCluster(stackM,'addregionalrds',{ + region: 'ap-southeast-1', + dbSubnetGroupName: globaldbS.dbSubnetGroup.dbSubnetGroupName, +}); +``` + +### To deploy + +```bash +cdk deploy +``` + +### To destroy + +```bash +cdk destroy +``` + +## :clap: Supporters + +[](https://github.com/neilkuan/cdk-aurora-globaldatabase/stargazers) +[](https://github.com/neilkuan/cdk-aurora-globaldatabase/network/members) + + +%package -n python3-cdk-aurora-globaldatabase +Summary: cdk-aurora-globaldatabase is an AWS CDK construct library that provides Cross Region Create Global Aurora RDS Databases. +Provides: python-cdk-aurora-globaldatabase +BuildRequires: python3-devel +BuildRequires: python3-setuptools +BuildRequires: python3-pip +%description -n python3-cdk-aurora-globaldatabase +[](https://badge.fury.io/js/cdk-aurora-globaldatabase) +[](https://badge.fury.io/py/cdk-aurora-globaldatabase) + + + + + + +# cdk-aurora-globaldatabase + +`cdk-aurora-globaldatabase` is an AWS CDK construct library that allows you to create [Amazon Aurora Global Databases](https://aws.amazon.com/rds/aurora/global-database/) with AWS CDK in Typescript or Python. + +# Why + +**Amazon Aurora Global Databases** is designed for multi-regional Amazon Aurora Database clusters that span across different AWS regions. Due to the lack of native cloudformation support, it has been very challenging to build with cloudformation or AWS CDK with the upstream `aws-rds` construct. + +`cdk-aurora-globaldatabase` aims to offload the heavy-lifting and helps you provision and deploy cross-regional **Amazon Aurora Global Databases** simply with just a few CDK statements. + +## Install + +```bash +Use the npm dist tag to opt in CDKv1 or CDKv2: + +// for CDKv2 +npm install cdk-aurora-globaldatabase +or +npm install cdk-aurora-globaldatabase@latest + +// for CDKv1 +npm install cdk-aurora-globaldatabase@cdkv1 +``` + +# ⛔️ Please do not use cdk v1, because lot of db engine version already not been update in @aws-cdk/aws-rds upstream. ⛔️ + +## Now Try It !!! + +# Sample for Mysql + +```python +import { GlobalAuroraRDSMaster, InstanceTypeEnum, GlobalAuroraRDSSlaveInfra } from 'cdk-aurora-globaldatabase'; +import { App, Stack, CfnOutput } from 'aws-cdk-lib'; +import * as ec2 from 'aws-cdk-lib/aws-ec2'; +// new app . +const mockApp = new App(); + +// setting two region env config . +const envSingapro = { account: process.env.CDK_DEFAULT_ACCOUNT, region: 'ap-southeast-1' }; +const envTokyo = { account: process.env.CDK_DEFAULT_ACCOUNT, region: 'ap-northeast-1' }; + +// create stack main . +const stackM = new Stack(mockApp, 'testing-stackM',{env: envTokyo}); +const vpcPublic = new ec2.Vpc(stackM,'defaultVpc',{ + natGateways: 0, + maxAzs: 3, + subnetConfiguration: [{ + cidrMask: 26, + name: 'masterVPC2', + subnetType: ec2.SubnetType.PUBLIC, + }], +}); +const globaldbM = new GlobalAuroraRDSMaster(stackM, 'globalAuroraRDSMaster',{ + instanceType: InstanceTypeEnum.R5_LARGE, + vpc: vpcPublic, + rdsPassword: '1qaz2wsx', +}); +globaldbM.rdsCluster.connections.allowDefaultPortFrom(ec2.Peer.ipv4(`${process.env.MYIP}/32`)) + +// create stack slave infra or you can give your subnet group. +const stackS = new Stack(mockApp, 'testing-stackS',{env: envSingapro}); +const vpcPublic2 = new ec2.Vpc(stackS,'defaultVpc2',{ + natGateways: 0, + maxAzs: 3, + subnetConfiguration: [{ + cidrMask: 26, + name: 'secondVPC2', + subnetType: ec2.SubnetType.PUBLIC, + }], +}); +const globaldbS = new GlobalAuroraRDSSlaveInfra(stackS, 'slaveregion',{vpc: vpcPublic2,subnetType:ec2.SubnetType.PUBLIC }); + +// so we need to wait stack slave created first . +stackM.addDependency(stackS) + + +new CfnOutput(stackM, 'password', { value: globaldbM.rdsPassword }); +// add second region cluster +globaldbM.addRegionalCluster(stackM,'addregionalrds',{ + region: 'ap-southeast-1', + dbSubnetGroupName: globaldbS.dbSubnetGroup.dbSubnetGroupName, +}); +``` + + + +# Sample for Postgres + +```python +import { GlobalAuroraRDSMaster, InstanceTypeEnum, GlobalAuroraRDSSlaveInfra } from 'cdk-aurora-globaldatabase'; +import { App, Stack, CfnOutput } from 'aws-cdk-lib'; +import * as ec2 from 'aws-cdk-lib/aws-ec2'; +import * as _rds from 'aws-cdk-lib/aws-rds'; + +const mockApp = new App(); +const envSingapro = { account: process.env.CDK_DEFAULT_ACCOUNT, region: 'ap-southeast-1' }; +const envTokyo = { account: process.env.CDK_DEFAULT_ACCOUNT, region: 'ap-northeast-1' }; + +const stackM = new Stack(mockApp, 'testing-stackM',{env: envTokyo}); +const vpcPublic = new ec2.Vpc(stackM,'defaultVpc',{ + natGateways: 0, + maxAzs: 3, + subnetConfiguration: [{ + cidrMask: 26, + name: 'masterVPC2', + subnetType: ec2.SubnetType.PUBLIC, + }], +}); + +// Note if you use postgres , need to give the same value in engineVersion and dbClusterpPG's engine . +const globaldbM = new GlobalAuroraRDSMaster(stackM, 'globalAuroraRDSMaster',{ + instanceType: InstanceTypeEnum.R5_LARGE, + vpc: vpcPublic, + rdsPassword: '1qaz2wsx', + engineVersion: _rds.DatabaseClusterEngine.auroraPostgres({ + version: _rds.AuroraPostgresEngineVersion.VER_12_11}), + dbClusterpPG: new _rds.ParameterGroup(stackM, 'dbClusterparametergroup', { + engine: _rds.DatabaseClusterEngine.auroraPostgres({ + version: _rds.AuroraPostgresEngineVersion.VER_12_11, + }), + parameters: { + 'rds.force_ssl': '1', + 'rds.log_retention_period': '10080', + 'auto_explain.log_min_duration': '5000', + 'auto_explain.log_verbose': '1', + 'timezone': 'UTC+8', + 'shared_preload_libraries': 'auto_explain,pg_stat_statements,pg_hint_plan,pgaudit', + 'log_connections': '1', + 'log_statement': 'ddl', + 'log_disconnections': '1', + 'log_lock_waits': '1', + 'log_min_duration_statement': '5000', + 'log_rotation_age': '1440', + 'log_rotation_size': '102400', + 'random_page_cost': '1', + 'track_activity_query_size': '16384', + 'idle_in_transaction_session_timeout': '7200000', + }, + }), +}); +globaldbM.rdsCluster.connections.allowDefaultPortFrom(ec2.Peer.ipv4(`${process.env.MYIP}/32`)) + +const stackS = new Stack(mockApp, 'testing-stackS',{env: envSingapro}); +const vpcPublic2 = new ec2.Vpc(stackS,'defaultVpc2',{ + natGateways: 0, + maxAzs: 3, + subnetConfiguration: [{ + cidrMask: 26, + name: 'secondVPC2', + subnetType: ec2.SubnetType.PUBLIC, + }], +}); +const globaldbS = new GlobalAuroraRDSSlaveInfra(stackS, 'slaveregion',{ + vpc: vpcPublic2,subnetType:ec2.SubnetType.PUBLIC, +}); + +stackM.addDependency(stackS) + + +new CfnOutput(stackM, 'password', { value: globaldbM.rdsPassword }); +// add second region cluster +globaldbM.addRegionalCluster(stackM,'addregionalrds',{ + region: 'ap-southeast-1', + dbSubnetGroupName: globaldbS.dbSubnetGroup.dbSubnetGroupName, +}); +``` + +### To deploy + +```bash +cdk deploy +``` + +### To destroy + +```bash +cdk destroy +``` + +## :clap: Supporters + +[](https://github.com/neilkuan/cdk-aurora-globaldatabase/stargazers) +[](https://github.com/neilkuan/cdk-aurora-globaldatabase/network/members) + + +%package help +Summary: Development documents and examples for cdk-aurora-globaldatabase +Provides: python3-cdk-aurora-globaldatabase-doc +%description help +[](https://badge.fury.io/js/cdk-aurora-globaldatabase) +[](https://badge.fury.io/py/cdk-aurora-globaldatabase) + + + + + + +# cdk-aurora-globaldatabase + +`cdk-aurora-globaldatabase` is an AWS CDK construct library that allows you to create [Amazon Aurora Global Databases](https://aws.amazon.com/rds/aurora/global-database/) with AWS CDK in Typescript or Python. + +# Why + +**Amazon Aurora Global Databases** is designed for multi-regional Amazon Aurora Database clusters that span across different AWS regions. Due to the lack of native cloudformation support, it has been very challenging to build with cloudformation or AWS CDK with the upstream `aws-rds` construct. + +`cdk-aurora-globaldatabase` aims to offload the heavy-lifting and helps you provision and deploy cross-regional **Amazon Aurora Global Databases** simply with just a few CDK statements. + +## Install + +```bash +Use the npm dist tag to opt in CDKv1 or CDKv2: + +// for CDKv2 +npm install cdk-aurora-globaldatabase +or +npm install cdk-aurora-globaldatabase@latest + +// for CDKv1 +npm install cdk-aurora-globaldatabase@cdkv1 +``` + +# ⛔️ Please do not use cdk v1, because lot of db engine version already not been update in @aws-cdk/aws-rds upstream. ⛔️ + +## Now Try It !!! + +# Sample for Mysql + +```python +import { GlobalAuroraRDSMaster, InstanceTypeEnum, GlobalAuroraRDSSlaveInfra } from 'cdk-aurora-globaldatabase'; +import { App, Stack, CfnOutput } from 'aws-cdk-lib'; +import * as ec2 from 'aws-cdk-lib/aws-ec2'; +// new app . +const mockApp = new App(); + +// setting two region env config . +const envSingapro = { account: process.env.CDK_DEFAULT_ACCOUNT, region: 'ap-southeast-1' }; +const envTokyo = { account: process.env.CDK_DEFAULT_ACCOUNT, region: 'ap-northeast-1' }; + +// create stack main . +const stackM = new Stack(mockApp, 'testing-stackM',{env: envTokyo}); +const vpcPublic = new ec2.Vpc(stackM,'defaultVpc',{ + natGateways: 0, + maxAzs: 3, + subnetConfiguration: [{ + cidrMask: 26, + name: 'masterVPC2', + subnetType: ec2.SubnetType.PUBLIC, + }], +}); +const globaldbM = new GlobalAuroraRDSMaster(stackM, 'globalAuroraRDSMaster',{ + instanceType: InstanceTypeEnum.R5_LARGE, + vpc: vpcPublic, + rdsPassword: '1qaz2wsx', +}); +globaldbM.rdsCluster.connections.allowDefaultPortFrom(ec2.Peer.ipv4(`${process.env.MYIP}/32`)) + +// create stack slave infra or you can give your subnet group. +const stackS = new Stack(mockApp, 'testing-stackS',{env: envSingapro}); +const vpcPublic2 = new ec2.Vpc(stackS,'defaultVpc2',{ + natGateways: 0, + maxAzs: 3, + subnetConfiguration: [{ + cidrMask: 26, + name: 'secondVPC2', + subnetType: ec2.SubnetType.PUBLIC, + }], +}); +const globaldbS = new GlobalAuroraRDSSlaveInfra(stackS, 'slaveregion',{vpc: vpcPublic2,subnetType:ec2.SubnetType.PUBLIC }); + +// so we need to wait stack slave created first . +stackM.addDependency(stackS) + + +new CfnOutput(stackM, 'password', { value: globaldbM.rdsPassword }); +// add second region cluster +globaldbM.addRegionalCluster(stackM,'addregionalrds',{ + region: 'ap-southeast-1', + dbSubnetGroupName: globaldbS.dbSubnetGroup.dbSubnetGroupName, +}); +``` + + + +# Sample for Postgres + +```python +import { GlobalAuroraRDSMaster, InstanceTypeEnum, GlobalAuroraRDSSlaveInfra } from 'cdk-aurora-globaldatabase'; +import { App, Stack, CfnOutput } from 'aws-cdk-lib'; +import * as ec2 from 'aws-cdk-lib/aws-ec2'; +import * as _rds from 'aws-cdk-lib/aws-rds'; + +const mockApp = new App(); +const envSingapro = { account: process.env.CDK_DEFAULT_ACCOUNT, region: 'ap-southeast-1' }; +const envTokyo = { account: process.env.CDK_DEFAULT_ACCOUNT, region: 'ap-northeast-1' }; + +const stackM = new Stack(mockApp, 'testing-stackM',{env: envTokyo}); +const vpcPublic = new ec2.Vpc(stackM,'defaultVpc',{ + natGateways: 0, + maxAzs: 3, + subnetConfiguration: [{ + cidrMask: 26, + name: 'masterVPC2', + subnetType: ec2.SubnetType.PUBLIC, + }], +}); + +// Note if you use postgres , need to give the same value in engineVersion and dbClusterpPG's engine . +const globaldbM = new GlobalAuroraRDSMaster(stackM, 'globalAuroraRDSMaster',{ + instanceType: InstanceTypeEnum.R5_LARGE, + vpc: vpcPublic, + rdsPassword: '1qaz2wsx', + engineVersion: _rds.DatabaseClusterEngine.auroraPostgres({ + version: _rds.AuroraPostgresEngineVersion.VER_12_11}), + dbClusterpPG: new _rds.ParameterGroup(stackM, 'dbClusterparametergroup', { + engine: _rds.DatabaseClusterEngine.auroraPostgres({ + version: _rds.AuroraPostgresEngineVersion.VER_12_11, + }), + parameters: { + 'rds.force_ssl': '1', + 'rds.log_retention_period': '10080', + 'auto_explain.log_min_duration': '5000', + 'auto_explain.log_verbose': '1', + 'timezone': 'UTC+8', + 'shared_preload_libraries': 'auto_explain,pg_stat_statements,pg_hint_plan,pgaudit', + 'log_connections': '1', + 'log_statement': 'ddl', + 'log_disconnections': '1', + 'log_lock_waits': '1', + 'log_min_duration_statement': '5000', + 'log_rotation_age': '1440', + 'log_rotation_size': '102400', + 'random_page_cost': '1', + 'track_activity_query_size': '16384', + 'idle_in_transaction_session_timeout': '7200000', + }, + }), +}); +globaldbM.rdsCluster.connections.allowDefaultPortFrom(ec2.Peer.ipv4(`${process.env.MYIP}/32`)) + +const stackS = new Stack(mockApp, 'testing-stackS',{env: envSingapro}); +const vpcPublic2 = new ec2.Vpc(stackS,'defaultVpc2',{ + natGateways: 0, + maxAzs: 3, + subnetConfiguration: [{ + cidrMask: 26, + name: 'secondVPC2', + subnetType: ec2.SubnetType.PUBLIC, + }], +}); +const globaldbS = new GlobalAuroraRDSSlaveInfra(stackS, 'slaveregion',{ + vpc: vpcPublic2,subnetType:ec2.SubnetType.PUBLIC, +}); + +stackM.addDependency(stackS) + + +new CfnOutput(stackM, 'password', { value: globaldbM.rdsPassword }); +// add second region cluster +globaldbM.addRegionalCluster(stackM,'addregionalrds',{ + region: 'ap-southeast-1', + dbSubnetGroupName: globaldbS.dbSubnetGroup.dbSubnetGroupName, +}); +``` + +### To deploy + +```bash +cdk deploy +``` + +### To destroy + +```bash +cdk destroy +``` + +## :clap: Supporters + +[](https://github.com/neilkuan/cdk-aurora-globaldatabase/stargazers) +[](https://github.com/neilkuan/cdk-aurora-globaldatabase/network/members) + + +%prep +%autosetup -n cdk-aurora-globaldatabase-2.3.114 + +%build +%py3_build + +%install +%py3_install +install -d -m755 %{buildroot}/%{_pkgdocdir} +if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi +if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi +if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi +if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi +pushd %{buildroot} +if [ -d usr/lib ]; then + find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst +fi +if [ -d usr/lib64 ]; then + find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst +fi +if [ -d usr/bin ]; then + find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst +fi +if [ -d usr/sbin ]; then + find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst +fi +touch doclist.lst +if [ -d usr/share/man ]; then + find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst +fi +popd +mv %{buildroot}/filelist.lst . +mv %{buildroot}/doclist.lst . + +%files -n python3-cdk-aurora-globaldatabase -f filelist.lst +%dir %{python3_sitelib}/* + +%files help -f doclist.lst +%{_docdir}/* + +%changelog +* Tue Apr 11 2023 Python_Bot <Python_Bot@openeuler.org> - 2.3.114-1 +- Package Spec generated |
