Skip to content

Commit

Permalink
Merge pull request #6 from rebbuh/main
Browse files Browse the repository at this point in the history
Adds support for lifecycle rules
  • Loading branch information
swoehrl-mw authored Jan 12, 2023
2 parents d44ec61 + 945c152 commit c2cea1e
Show file tree
Hide file tree
Showing 3 changed files with 74 additions and 4 deletions.
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,11 @@ spec:
retentionPeriodInDays: 1 # Days to keep deleted data, optional
backup:
enabled: false # Override the default backup strategy configured in the global operator config
lifecycle: # Define rules which determine the lifecycle of blob resources, optional
rules:
- name: foobar-rule # Lifecycle rule name, optional
blobPrefix: foobar # Prefix of blob resources to apply rule to, required
deleteDaysAfterModification: 30 # Delete blob resources after number of days after last modification, required
containers: # Only relevant for azure, list of containers to create in the bucket, for azure at least one is required, containers not on the list will be removed from the storage account, including their data
- name: assets # Name of the container, required
anonymousAccess: false # If set to true objects in the container can be accessed without authentication/authorization, only relevant if `security.anonymousAccess` is set to true, optional
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,23 @@ spec:
properties:
enabled:
type: boolean
lifecycle:
type: object
properties:
rules:
type: array
items:
type: object
properties:
name:
type: string
blobPrefix:
type: string
deleteDaysAfterModification:
type: number
required:
- blobPrefix
- deleteDaysAfterModification
containers:
type: array
items:
Expand Down
56 changes: 52 additions & 4 deletions hybridcloud/backends/azureblob.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@
BlobServiceProperties, \
CorsRules, CorsRule, NetworkRuleSet, IPRule, VirtualNetworkRule, BlobContainer, \
StorageAccountCheckNameAvailabilityParameters, StorageAccountRegenerateKeyParameters, \
DeleteRetentionPolicy, RestorePolicyProperties, ChangeFeed, LocalUser, PermissionScope, SshPublicKey
DeleteRetentionPolicy, RestorePolicyProperties, ChangeFeed, LocalUser, PermissionScope, SshPublicKey, \
ManagementPolicy, ManagementPolicySchema, ManagementPolicyRule, RuleType, ManagementPolicyDefinition, \
ManagementPolicyAction, ManagementPolicyFilter, ManagementPolicyBaseBlob, DateAfterModification
from azure.mgmt.resource.locks.models import ManagementLockObject
from azure.mgmt.dataprotection.models import BackupInstanceResource, BackupInstance, PolicyInfo, Datasource
from ..util.azure import azure_client_storage, azure_client_locks, azure_backup_client
Expand All @@ -15,6 +17,7 @@
TAGS_PREFIX = "hybridcloud-object-storage-operator"
HTTP_METHODS = ["DELETE", "GET", "HEAD", "MERGE", "OPTIONS", "PATCH", "POST", "PUT"]
SFTP_USER_PERMISSIONS = ["READ", "WRITE", "DELETE", "LIST", "CREATE"]
LIFECYCLE_MANAGEMENT_POLICY_NAME = "default"


def _backend_config(key, default=None, fail_if_missing=False):
Expand Down Expand Up @@ -204,9 +207,9 @@ def create_or_update_bucket(self, namespace, name, spec):
if existing_username not in users_from_spec:
self._storage_client.local_users.delete(self._resource_group, bucket_name, existing_username)

if backup_enabled:
storage_account = self._storage_client.storage_accounts.get_properties(self._resource_group, bucket_name)
storage_account = self._storage_client.storage_accounts.get_properties(self._resource_group, bucket_name)

if backup_enabled:
vault_name = _backend_config("backup.vault_name", fail_if_missing=True)
policy_name = _backend_config("backup.policy_name", fail_if_missing=True)

Expand Down Expand Up @@ -235,6 +238,31 @@ def create_or_update_bucket(self, namespace, name, spec):
parameters=backup_properties
).result()

lifecycle_policy = self._map_lifecycle_policy(spec)
if lifecycle_policy is not None:
self._storage_client.management_policies.create_or_update(
resource_group_name=self._resource_group,
account_name=storage_account.name,
management_policy_name=LIFECYCLE_MANAGEMENT_POLICY_NAME,
properties=lifecycle_policy
)
else:
try:
# The following call will yield a ResourceNotFoundError iff the policy does not exist,
# hence we will not try to delete it
self._storage_client.management_policies.get(
resource_group_name=self._resource_group,
account_name=storage_account.name,
management_policy_name=LIFECYCLE_MANAGEMENT_POLICY_NAME,
)
self._storage_client.management_policies.delete(
resource_group_name=self._resource_group,
account_name=storage_account.name,
management_policy_name=LIFECYCLE_MANAGEMENT_POLICY_NAME,
)
except ResourceNotFoundError:
pass

# Credentials
for key in self._storage_client.storage_accounts.list_keys(self._resource_group, bucket_name).keys:
if key.key_name == "key1":
Expand All @@ -246,7 +274,6 @@ def create_or_update_bucket(self, namespace, name, spec):
}
raise Exception("Could not find keys in azure")


def delete_bucket(self, namespace, name):
bucket_name = _calc_name(namespace, name)
delete_fake = _backend_config("delete_fake", default=False)
Expand Down Expand Up @@ -296,6 +323,27 @@ def _map_network_rules(self, spec, public_access):
default_action="Allow" if public_access else "Deny"
)

def _map_lifecycle_policy(self, spec):
lifecycle_rules = []
spec_lifecycle_rules = field_from_spec(spec, "lifecycle.rules", [])
if len(spec_lifecycle_rules) == 0:
return None
for index, rule in enumerate(spec_lifecycle_rules):
rule_name = rule.get("name", f"rule-{index}")
lifecycle_rules.append(ManagementPolicyRule(name=rule_name,
type=RuleType.LIFECYCLE,
definition=ManagementPolicyDefinition(
actions=ManagementPolicyAction(
base_blob=ManagementPolicyBaseBlob(
delete=DateAfterModification(
days_after_modification_greater_than=rule[
"deleteDaysAfterModification"]))),
filters=ManagementPolicyFilter(
blob_types=["blockBlob"],
prefix_match=[rule["blobPrefix"]])),
enabled=True))
return ManagementPolicy(policy=ManagementPolicySchema(rules=lifecycle_rules))

def _get_backup_lock(self, bucket_name):
try:
return self._lock_client.management_locks.get_at_resource_level(
Expand Down

0 comments on commit c2cea1e

Please sign in to comment.