diff --git a/modules/machinery/az.py b/modules/machinery/az.py index a8f5edeed16..a5d9971cc6d 100644 --- a/modules/machinery/az.py +++ b/modules/machinery/az.py @@ -34,6 +34,7 @@ from azure.identity import CertificateCredential, ClientSecretCredential from azure.mgmt.compute import ComputeManagementClient, models from azure.mgmt.network import NetworkManagementClient + from azure.core.exceptions import ResourceNotFoundError from msrest.polling import LROPoller HAVE_AZURE = True @@ -334,7 +335,8 @@ def _process_pre_existing_vmsss(self): # Cuckoo (AUTO_SCALE_CAPE key-value pair), ignore if not vmss.tags or not vmss.tags.get(Azure.AUTO_SCALE_CAPE_KEY) == Azure.AUTO_SCALE_CAPE_VALUE: # Ignoring... unless! They have one of the required names of the VMSSs that we are going to create - if vmss.name in self.required_vmsss.keys(): + if vmss.name in self.required_vmsss.keys() and not self.options.az.just_start: + log.info("Deleting VMSS %s as it is incorrectly configured and just_start is false", vmss.name) async_delete_vmss = Azure._azure_api_call( self.options.az.sandbox_resource_group, vmss.name, @@ -385,9 +387,10 @@ def _process_pre_existing_vmsss(self): operation=self.compute_client.virtual_machine_scale_sets.begin_update, ) _ = self._handle_poller_result(update_vmss_image) - elif not self.options.az.multiple_capes_in_sandbox_rg: + elif not self.options.az.multiple_capes_in_sandbox_rg and not self.options.az.just_start: # VMSS does not have the required name but has the tag that we associate with being a # correct VMSS + log.info("Deleting VMSS %s as multiple_capes_in_sandbox_rg is false and just_start is false", vmss.name) Azure._azure_api_call( self.options.az.sandbox_resource_group, vmss.name, @@ -695,77 +698,85 @@ def _add_machines_to_db(self, vmss_name): ) # Turn the Paged result into a list - vmss_vm_nics = [vmss_vm_nic for vmss_vm_nic in paged_vmss_vm_nics] + try: + vmss_vm_nics = [vmss_vm_nic for vmss_vm_nic in paged_vmss_vm_nics] + except ResourceNotFoundError: + log.debug("No network interfaces found for VMSS %s (capacity=0)", vmss_name) + vmss_vm_nics = [] # This will be used if we are in the initializing phase of the system ready_vmss_vm_threads = {} with vms_currently_being_deleted_lock: vms_to_avoid_adding = vms_currently_being_deleted - for vmss_vm in paged_vmss_vms: - if vmss_vm.name in db_machine_labels: - # Don't add it if it already exists! - continue - if vmss_vm.name in vms_to_avoid_adding: - # Don't add it if it is currently being deleted! - log.debug("%s is currently being deleted!", vmss_vm.name) - continue - # According to Microsoft, the OS type is... - platform = vmss_vm.storage_profile.os_disk.os_type.lower() - if not vmss_vm.network_profile: - log.error("%s does not have a network profile", vmss_vm.name) - continue + try: + for vmss_vm in paged_vmss_vms: + if vmss_vm.name in db_machine_labels: + # Don't add it if it already exists! + continue + if vmss_vm.name in vms_to_avoid_adding: + # Don't add it if it is currently being deleted! + log.debug("%s is currently being deleted!", vmss_vm.name) + continue + # According to Microsoft, the OS type is... + platform = vmss_vm.storage_profile.os_disk.os_type.lower() - vmss_vm_nic = next( - ( - vmss_vm_nic - for vmss_vm_nic in vmss_vm_nics - if vmss_vm.network_profile.network_interfaces[0].id.lower() == vmss_vm_nic.id.lower() - ), - None, - ) - if not vmss_vm_nic: - log.error( - "%s does not match any NICs in %s", vmss_vm.network_profile.network_interfaces[0].id.lower(), str([vmss_vm_nic.id.lower() for vmss_vm_nic in vmss_vm_nics]) - ) - continue - # Sets "new_machine" object in configuration object to - # avoid raising an exception. - setattr(self.options, vmss_vm.name, {}) - - private_ip = vmss_vm_nic.ip_configurations[0].private_ip_address - if private_ip in db_machine_ips: - existing_machines = [machine for machine in machines_in_db if machine.ip == private_ip] - vmss_name, _ = existing_machines[0].label.split("_") - self._delete_machines_from_db_if_missing(vmss_name) - - # Add machine to DB. - # TODO: What is the point of name vs label? - self.db.add_machine( - name=vmss_vm.name, - label=vmss_vm.name, - ip=private_ip, - platform=platform, - tags=self.options.az.scale_sets[vmss_name].pool_tag, - arch=self.options.az.scale_sets[vmss_name].arch, - interface=self.options.az.interface, - snapshot=vmss_vm.storage_profile.image_reference.id, - resultserver_ip=self.options.az.resultserver_ip, - resultserver_port=self.options.az.resultserver_port, - reserved=False, - ) - # We always wait for Cuckoo agent to finish setting up if 'wait_for_agent_before_starting' is true or if we are initializing. - # Else, the machine should become immediately available in DB. - if self.initializing or self.options.az.wait_for_agent_before_starting: - thr = threading.Thread( - target=Azure._thr_wait_for_ready_machine, - args=( - vmss_vm.name, - private_ip, + if not vmss_vm.network_profile: + log.error("%s does not have a network profile", vmss_vm.name) + continue + + vmss_vm_nic = next( + ( + vmss_vm_nic + for vmss_vm_nic in vmss_vm_nics + if vmss_vm.network_profile.network_interfaces[0].id.lower() == vmss_vm_nic.id.lower() ), + None, ) - ready_vmss_vm_threads[vmss_vm.name] = thr - thr.start() + if not vmss_vm_nic: + log.error( + "%s does not match any NICs in %s", vmss_vm.network_profile.network_interfaces[0].id.lower(), str([vmss_vm_nic.id.lower() for vmss_vm_nic in vmss_vm_nics]) + ) + continue + # Sets "new_machine" object in configuration object to + # avoid raising an exception. + setattr(self.options, vmss_vm.name, {}) + + private_ip = vmss_vm_nic.ip_configurations[0].private_ip_address + if private_ip in db_machine_ips: + existing_machines = [machine for machine in machines_in_db if machine.ip == private_ip] + vmss_name, _ = existing_machines[0].label.split("_") + self._delete_machines_from_db_if_missing(vmss_name) + + # Add machine to DB. + # TODO: What is the point of name vs label? + self.db.add_machine( + name=vmss_vm.name, + label=vmss_vm.name, + ip=private_ip, + platform=platform, + tags=self.options.az.scale_sets[vmss_name].pool_tag, + arch=self.options.az.scale_sets[vmss_name].arch, + interface=self.options.az.interface, + snapshot=vmss_vm.storage_profile.image_reference.id, + resultserver_ip=self.options.az.resultserver_ip, + resultserver_port=self.options.az.resultserver_port, + reserved=False, + ) + # We always wait for Cuckoo agent to finish setting up if 'wait_for_agent_before_starting' is true or if we are initializing. + # Else, the machine should become immediately available in DB. + if self.initializing or self.options.az.wait_for_agent_before_starting: + thr = threading.Thread( + target=Azure._thr_wait_for_ready_machine, + args=( + vmss_vm.name, + private_ip, + ), + ) + ready_vmss_vm_threads[vmss_vm.name] = thr + thr.start() + except ResourceNotFoundError: + log.debug("No VMs found for VMSS %s (capacity=0)", vmss_name) if ready_vmss_vm_threads: for vm, thr in ready_vmss_vm_threads.items(): @@ -807,7 +818,11 @@ def _delete_machines_from_db_if_missing(self, vmss_name): ) # Turn the Paged result into a list - vmss_vm_names = [vmss_vm.name for vmss_vm in paged_vmss_vms] + try: + vmss_vm_names = [vmss_vm.name for vmss_vm in paged_vmss_vms] + except ResourceNotFoundError: + log.debug("No VMs found for VMSS %s (capacity=0)", vmss_name) + vmss_vm_names = [] for machine in self.db.list_machines(): # If machine entry in database is part of VMSS but machine in VMSS does not exist, delete @@ -907,109 +922,111 @@ def _thr_create_vmss(self, vmss_name, vmss_image_ref, vmss_image_os): @param vmss_image_os: The platform of the image @param vmss_tag: the tag used that represents the OS image """ - try: - self.subnet_id = Azure._azure_api_call( - self.options.az.vnet_resource_group, - self.options.az.vnet, - self.options.az.subnet, - operation=self.network_client.subnets.get, - ).id # note the id attribute here - except CuckooMachineError: - raise CuckooCriticalError( - "Subnet '%s' does not exist in Virtual Network '%s'", self.options.az.subnet, self.options.az.vnet - ) + try: + self.subnet_id = Azure._azure_api_call( + self.options.az.vnet_resource_group, + self.options.az.vnet, + self.options.az.subnet, + operation=self.network_client.subnets.get, + ).id # note the id attribute here + except CuckooMachineError: + raise CuckooCriticalError( + "Subnet '%s' does not exist in Virtual Network '%s'", self.options.az.subnet, self.options.az.vnet + ) - vmss_managed_disk = models.VirtualMachineScaleSetManagedDiskParameters( - storage_account_type=self.options.az.storage_account_type - ) - vmss_os_disk = models.VirtualMachineScaleSetOSDisk( - create_option="FromImage", - os_type=vmss_image_os, - managed_disk=vmss_managed_disk, - # Ephemeral disk time - caching="ReadOnly", - diff_disk_settings=models.DiffDiskSettings(option="Local", placement=self.options.az.ephemeral_os_disk_placement), - ) - vmss_storage_profile = models.VirtualMachineScaleSetStorageProfile( - image_reference=vmss_image_ref, - os_disk=vmss_os_disk, - ) - vmss_dns_settings = models.VirtualMachineScaleSetNetworkConfigurationDnsSettings( - dns_servers=self.options.az.dns_server_ips.strip().split(",") - ) - vmss_ip_config = models.VirtualMachineScaleSetIPConfiguration( - name="vmss_ip_config", - subnet=models.ApiEntityReference(id=self.subnet_id), - private_ip_address_version="IPv4", - ) - vmss_network_config = models.VirtualMachineScaleSetNetworkConfiguration( - name="vmss_network_config", - dns_settings=vmss_dns_settings, - ip_configurations=[vmss_ip_config], - primary=True, - ) - vmss_network_profile = models.VirtualMachineScaleSetNetworkProfile(network_interface_configurations=[vmss_network_config]) - # If the user wants spot instances, then give them spot instances! - if self.options.az.spot_instances: - vmss_vm_profile = models.VirtualMachineScaleSetVMProfile( - storage_profile=vmss_storage_profile, - network_profile=vmss_network_profile, - # Note: The following key value pairs are for Azure spot instances - priority=models.VirtualMachinePriorityTypes.spot, - eviction_policy=models.VirtualMachineEvictionPolicyTypes.delete, - # Note: This value may change depending on your needs. - billing_profile=models.BillingProfile(max_price=float(-1)), + vmss_managed_disk = models.VirtualMachineScaleSetManagedDiskParameters( + storage_account_type=self.options.az.storage_account_type ) - else: - vmss_vm_profile = models.VirtualMachineScaleSetVMProfile( - storage_profile=vmss_storage_profile, - network_profile=vmss_network_profile, - priority=models.VirtualMachinePriorityTypes.REGULAR, + vmss_os_disk = models.VirtualMachineScaleSetOSDisk( + create_option="FromImage", + os_type=vmss_image_os, + managed_disk=vmss_managed_disk, + # Ephemeral disk time + caching="ReadOnly", + diff_disk_settings=models.DiffDiskSettings(option="Local", placement=self.options.az.ephemeral_os_disk_placement), ) - vmss = models.VirtualMachineScaleSet( - location=self.options.az.region_name, - tags=Azure.AUTO_SCALE_CAPE_TAG, - sku=models.Sku(name=self.options.az.instance_type, capacity=self.required_vmsss[vmss_name]["initial_pool_size"]), - upgrade_policy=models.UpgradePolicy(mode="Automatic"), - virtual_machine_profile=vmss_vm_profile, - overprovision=False, - # When true this limits the scale set to a single placement group, of max size 100 virtual machines. - single_placement_group=False, - scale_in_policy=models.ScaleInPolicy(rules=[models.VirtualMachineScaleSetScaleInRules.newest_vm]), - spot_restore_policy=( - models.SpotRestorePolicy(enabled=True, restore_timeout="PT30M") if self.options.az.spot_instances else None - ), - ) - if not self.options.az.just_start: - async_vmss_creation = Azure._azure_api_call( - self.options.az.sandbox_resource_group, - vmss_name, - vmss, - polling_interval=1, - operation=self.compute_client.virtual_machine_scale_sets.begin_create_or_update, + vmss_storage_profile = models.VirtualMachineScaleSetStorageProfile( + image_reference=vmss_image_ref, + os_disk=vmss_os_disk, ) - _ = self._handle_poller_result(async_vmss_creation) - - # Initialize key-value pair for VMSS with specific details - machine_pools[vmss_name] = { - "size": self.required_vmsss[vmss_name]["initial_pool_size"], - "is_scaling": False, - "is_scaling_down": False, - "wait": False, - } - self.required_vmsss[vmss_name]["exists"] = True - try: - with self.db.session.begin(): + vmss_dns_settings = models.VirtualMachineScaleSetNetworkConfigurationDnsSettings( + dns_servers=self.options.az.dns_server_ips.strip().split(",") + ) + vmss_ip_config = models.VirtualMachineScaleSetIPConfiguration( + name="vmss_ip_config", + subnet=models.ApiEntityReference(id=self.subnet_id), + private_ip_address_version="IPv4", + ) + vmss_network_config = models.VirtualMachineScaleSetNetworkConfiguration( + name="vmss_network_config", + dns_settings=vmss_dns_settings, + ip_configurations=[vmss_ip_config], + primary=True, + ) + vmss_network_profile = models.VirtualMachineScaleSetNetworkProfile(network_interface_configurations=[vmss_network_config]) + # If the user wants spot instances, then give them spot instances! + if self.options.az.spot_instances: + vmss_vm_profile = models.VirtualMachineScaleSetVMProfile( + storage_profile=vmss_storage_profile, + network_profile=vmss_network_profile, + # Note: The following key value pairs are for Azure spot instances + priority=models.VirtualMachinePriorityTypes.spot, + eviction_policy=models.VirtualMachineEvictionPolicyTypes.delete, + # Note: This value may change depending on your needs. + billing_profile=models.BillingProfile(max_price=float(-1)), + ) + else: + vmss_vm_profile = models.VirtualMachineScaleSetVMProfile( + storage_profile=vmss_storage_profile, + network_profile=vmss_network_profile, + priority=models.VirtualMachinePriorityTypes.REGULAR, + ) + vmss = models.VirtualMachineScaleSet( + location=self.options.az.region_name, + tags=Azure.AUTO_SCALE_CAPE_TAG, + sku=models.Sku(name=self.options.az.instance_type, capacity=self.required_vmsss[vmss_name]["initial_pool_size"]), + upgrade_policy=models.UpgradePolicy(mode="Automatic"), + virtual_machine_profile=vmss_vm_profile, + overprovision=False, + # When true this limits the scale set to a single placement group, of max size 100 virtual machines. + single_placement_group=False, + scale_in_policy=models.ScaleInPolicy(rules=[models.VirtualMachineScaleSetScaleInRules.newest_vm]), + spot_restore_policy=( + models.SpotRestorePolicy(enabled=True, restore_timeout="PT30M") if self.options.az.spot_instances else None + ), + ) + if not self.options.az.just_start: + async_vmss_creation = Azure._azure_api_call( + self.options.az.sandbox_resource_group, + vmss_name, + vmss, + polling_interval=1, + operation=self.compute_client.virtual_machine_scale_sets.begin_create_or_update, + ) + _ = self._handle_poller_result(async_vmss_creation) + + # Initialize key-value pair for VMSS with specific details + machine_pools[vmss_name] = { + "size": self.required_vmsss[vmss_name]["initial_pool_size"], + "is_scaling": False, + "is_scaling_down": False, + "wait": False, + } + self.required_vmsss[vmss_name]["exists"] = True + try: + with self.db.session.begin(): + if machine_pools[vmss_name]["size"] == 0: + self._insert_placeholder_machine(vmss_name, self.required_vmsss[vmss_name]) + else: + self._add_machines_to_db(vmss_name) + except sqlalchemy.exc.InvalidRequestError: if machine_pools[vmss_name]["size"] == 0: self._insert_placeholder_machine(vmss_name, self.required_vmsss[vmss_name]) else: self._add_machines_to_db(vmss_name) - except sqlalchemy.exc.InvalidRequestError: - if machine_pools[vmss_name]["size"] == 0: - self._insert_placeholder_machine(vmss_name, self.required_vmsss[vmss_name]) - else: - self._add_machines_to_db(vmss_name) + except Exception as e: + log.exception("Exception in _thr_create_vmss for %s: %s", vmss_name, e) def _thr_reimage_vmss(self, vmss_name): """ @@ -1495,7 +1512,7 @@ def _thr_reimage_list_reader(self): current_vmss_operations -= 1 timediff = timeit.default_timer() - start_time log.debug( - "%successfully reimaging instances %s in %s took %ds", {'S' if reimaged else 'Uns'}, str(instance_ids), str(vmss_to_reimage), round(timediff) + "%ssuccessfully reimaging instances %s in %s took %ds", 'S' if reimaged else 'Un', str(instance_ids), str(vmss_to_reimage), round(timediff) ) except Exception as e: log.error("Exception occurred in the reimage thread: %s. Trying again...", str(e)) @@ -1572,7 +1589,7 @@ def _thr_delete_list_reader(self): with current_operations_lock: current_vmss_operations -= 1 log.debug( - "%successfully deleting instances %s in {vmss_to_delete_from} took %ss", 'S' if deleted else 'Uns', str(instance_ids), str(round(timeit.default_timer() - start_time)) + "%ssuccessfully deleting instances %s in %s took %ss", 'S' if deleted else 'Un', str(instance_ids), str(vmss_to_delete_from), str(round(timeit.default_timer() - start_time)) ) except Exception as e: log.error("Exception occurred in the delete thread: %s. Trying again...", str(e)) diff --git a/web/templates/account/email.html b/web/templates/account/email.html index d5fe78bac90..189ada18778 100644 --- a/web/templates/account/email.html +++ b/web/templates/account/email.html @@ -33,7 +33,7 @@

{% trans "E-mail Addresses" %}

- +
@@ -50,7 +50,7 @@

{% trans "Add E-mail Address" %}

{% csrf_token %} {{ form|crispy }} - +
{% endif %} diff --git a/web/templates/account/email_confirm.html b/web/templates/account/email_confirm.html index df9f3f7c6e8..4775860d2d7 100644 --- a/web/templates/account/email_confirm.html +++ b/web/templates/account/email_confirm.html @@ -17,7 +17,7 @@

{% trans "Confirm E-mail Address" %}

{% csrf_token %} - +
{% else %} diff --git a/web/templates/account/password_change.html b/web/templates/account/password_change.html index 70669f1da2c..8bc4549a265 100644 --- a/web/templates/account/password_change.html +++ b/web/templates/account/password_change.html @@ -9,7 +9,7 @@

{% trans "Change Password" %}

{% csrf_token %} {{ form|crispy }} - + {% trans "Forgot Password?" %}
{% endblock %} diff --git a/web/templates/account/password_reset.html b/web/templates/account/password_reset.html index 8d208bf866a..439dbeb187e 100644 --- a/web/templates/account/password_reset.html +++ b/web/templates/account/password_reset.html @@ -17,7 +17,7 @@

{% trans "Password Reset" %}

{% csrf_token %} {{ form|crispy }} - +

{% blocktrans %}Please contact us if you have any trouble resetting your password.{% endblocktrans %}

diff --git a/web/templates/account/password_reset_from_key.html b/web/templates/account/password_reset_from_key.html index b386fa5197c..4adc7c8a116 100644 --- a/web/templates/account/password_reset_from_key.html +++ b/web/templates/account/password_reset_from_key.html @@ -13,7 +13,7 @@

{% if token_fail %}{% trans "Bad Token" %}{% else %}{% trans "Change Passwor
{% csrf_token %} {{ form|crispy }} - +
{% else %}

{% trans 'Your password is now changed.' %}

diff --git a/web/templates/analysis/behavior/_processes.html b/web/templates/analysis/behavior/_processes.html index 17a6c9f98e4..2530c78b4ac 100644 --- a/web/templates/analysis/behavior/_processes.html +++ b/web/templates/analysis/behavior/_processes.html @@ -153,7 +153,7 @@

{{process.process_name}}
- +
+ diff --git a/web/templates/analysis/search.html b/web/templates/analysis/search.html index 27d57beb7f0..a1acc17264e 100644 --- a/web/templates/analysis/search.html +++ b/web/templates/analysis/search.html @@ -7,7 +7,7 @@
- +
diff --git a/web/templates/submission/index.html b/web/templates/submission/index.html index e532bc749ea..2d762af4652 100644 --- a/web/templates/submission/index.html +++ b/web/templates/submission/index.html @@ -282,18 +282,265 @@
Advance
- +
- +
-
- Common options: free=1 (no monitor), - full-logs=1, norefer=1. - See docs for full list. +
+

Syntax is option1=val1,option2=val2,option3=val3, etc.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OptionDescription
filenameRename the sample file
nameThis will force family extractor to run, Ex: name=trickbot
curdirChange from where execute sample, by default %TEMP%, Ex: curdir=%APPDATA% or + curdir=%APPDATA%\Microsoft\Windows\Start Menu\Programs\Startup +
executiondirSets directory to launch the file from. Need not be the same as the directory of sample file. Defaults to %TEMP% if both executiondir and curdir are not specified. Only supports full paths
freeRun without monitoring (disables many capabilities) Ex: free=1
force-sleepskipOverride default sleep skipping behavior: 0 disables all sleep skipping, 1 skips all sleeps.
full-logsBy default, logs prior to network activity for URL analyses and prior to access of the file in question for non-executable formats are suppressed. Set to 1 to disable log suppression.
force-flushFor performance reasons, logs are buffered before being sent back to the result server. We make every attempt to flush the buffer at critical points including when exceptions occur, but in some rare termination scenarios, logs may be lost. Set to 1 to force flushing of the log buffers after any non-duplicate API is called, set to 2 to force flushing of every log.
no-stealthSet to 1 to disable anti-anti-VM/sandbox code enabled by default.
buffer-maxWhen set to an integer of your choice, changes the maximum number of bytes that can be logged for most API buffers.
large-buffer-maxSome hooked APIs permit larger buffers to be logged. To change the limit for this, set this to an integer of your choice.
noreferDisables use of a fake referrer when performing URL analyses
fileWhen using the zip or rar package, set the name of the file to execute
passwordWhen using the zip or rar package, set the password to use for extraction. Also used when analyzing password-protected Office documents.
functionWhen using the dll package, set the name of the exported function/ordinal to execute. Can be multiple function/ordinals splited by colon. Ex: function=func1:func2
dllloaderWhen using the dll package, set the name of the process loading the DLL (defaults to rundll32.exe).
argumentsWhen using the dll, exe, or python packages, set the arguments to be passed to the executable or exported function.
appdataWhen using the exe package, set to 1 to run the executable out of the Application Data path instead of the Temp directory.
startbrowserSetting this option to 1 will launch a browser 30 seconds into the analysis (useful for some banking trojans).
browserdelaySets the number of seconds to wait before starting the browser with the startbrowser option. Defaults to 30 seconds.
urlWhen used with the startbrowser option, this will determine the URL the started browser will access.
debugSet to 1 to enable reporting of critical exceptions occurring during analysis, set to 2 to enable reporting of all exceptions.
disable_hook_contentSet to 1 to remove functionality of all hooks except those critical for monitoring other processes. Set to 2 to apply to all hooks.
hook-typeValid for 32-bit analyses only. Specifies the hook type to use: direct, indirect, or safe. Safe attempts a Detours-style hook.
serialSpoof the serial of the system volume as the provided hex value
single-processWhen set to 1 this will limit behaviour monitoring to the initial process only.
exclude-apisExclude the colon-separated list of APIs from being hooked
exclude-dllsExclude the colon-separated list of DLLs from being hooked
dropped-limitOverride the default dropped file limit of 100 files
compressionWhen set to 1 this will enable CAPE's extraction of compressed payloads
extractionWhen set to 1 this will enable CAPE's extraction of payloads from within each process
injectionWhen set to 1 this will enable CAPE's capture of injected payloads between processes
comboThis combines compression, injection and extraction with process dumps
dump-on-apiDump the calling module when a function from the colon-separated list of APIs is used
bp0Sets breakpoint 0 (processor/hardware) to a VA or RVA value (or module::export). Applies also to bp1-bp3.
file-offsetsBreakpoints in bp0-bp3 will be interpreted as PE file offsets rather than RVAs
break-on-returnSets breakpoints on the return address(es) from a colon-separated list of APIs
base-on-apiSets the base address to which breakpoints will be applied (and sets breakpoints)
depthSets the depth an instruction trace will step into (defaults to 0, requires Trace package)
countSets the number of instructions in a trace (defaults to 128, requires Trace package)
referrerSpecify the referrer to be used for URL tasks, overriding the default Google referrer
loop_detectionSet this option to 1 to enable loop detection (compress call logs - behavior analysis)
staticCheck if config can be extracted statically, if not, send to vm
Dl&Exec add headers examplednl_user_agent: "CAPE Sandbox", dnl_referer: google
servicedesc - for service packageService description
arguments - for service packageService arguments
store_memdumpWill force STORE memdump, only when submitting to analyzer node directly, as distributed cluster can modify this
pre_script_argsCommand line arguments for pre_script. Example: pre_script_args=file1 file2 file3
pre_script_timeoutpre_script_timeout will default to 60 seconds. Script will stop after timeout Example: pre_script_timeout=30
during_script_argsCommand line arguments for during_script. Example: during_script_args=file1 file2 file3
ignore_size_checkAllow ignore file size, must be enabled in conf/web.conf
pwshWhen using the ps1 package, prefer PowerShell Core (pwsh.exe) if available (defaults to powershell.exe)
unpackerEx: unpacker=2 - Add description here
check_shellcodeSetting check_shellcode=0 will disable checking for shellcode during package identification and extracting from archive
unhook-apisCapability to dynamically unhook previously hooked functions (unhook-apis option takes colon-separated list e.g. unhook-apis=NtSetInformationThread:NtDelayExecution)
ttdttd=1. TTD integration (Microsoft Time Travel Debugging). Requires binaries to be placed in correct folder
polarproxyRun PolarProxy to generate PCAP with decrypted TLS streams. Ex: polarproxy=1
tlsportTLS port for PolarProxy to MITM (Default: 443). Ex: tlsport=10443
mitmdumpRun mitmdump to generate HAR with decrypted TLS streams. Ex: mitmdump=1
+
@@ -482,7 +729,7 @@
Advance
-