WP_Query Object
(
[query] => Array
(
[post_type] => post
[showposts] => 8
[orderby] => Array
(
[date] => desc
) [autosort] => 0
[paged] => 13
[post__not_in] => Array
(
) ) [query_vars] => Array
(
[post_type] => post
[showposts] => 8
[orderby] => Array
(
[date] => desc
) [autosort] => 0
[paged] => 13
[post__not_in] => Array
(
) [error] =>
[m] =>
[p] => 0
[post_parent] =>
[subpost] =>
[subpost_id] =>
[attachment] =>
[attachment_id] => 0
[name] =>
[pagename] =>
[page_id] => 0
[second] =>
[minute] =>
[hour] =>
[day] => 0
[monthnum] => 0
[year] => 0
[w] => 0
[category_name] =>
[tag] =>
[cat] =>
[tag_id] =>
[author] =>
[author_name] =>
[feed] =>
[tb] =>
[meta_key] =>
[meta_value] =>
[preview] =>
[s] =>
[sentence] =>
[title] =>
[fields] =>
[menu_order] =>
[embed] =>
[category__in] => Array
(
) [category__not_in] => Array
(
) [category__and] => Array
(
) [post__in] => Array
(
) [post_name__in] => Array
(
) [tag__in] => Array
(
) [tag__not_in] => Array
(
) [tag__and] => Array
(
) [tag_slug__in] => Array
(
) [tag_slug__and] => Array
(
) [post_parent__in] => Array
(
) [post_parent__not_in] => Array
(
) [author__in] => Array
(
) [author__not_in] => Array
(
) [search_columns] => Array
(
) [ignore_sticky_posts] =>
[suppress_filters] =>
[cache_results] => 1
[update_post_term_cache] => 1
[update_menu_item_cache] =>
[lazy_load_term_meta] => 1
[update_post_meta_cache] => 1
[posts_per_page] => 8
[nopaging] =>
[comments_per_page] => 50
[no_found_rows] =>
[order] => DESC
) [tax_query] => WP_Tax_Query Object
(
[queries] => Array
(
) [relation] => AND
[table_aliases:protected] => Array
(
) [queried_terms] => Array
(
) [primary_table] => wp_yjtqs8r8ff_posts
[primary_id_column] => ID
) [meta_query] => WP_Meta_Query Object
(
[queries] => Array
(
) [relation] =>
[meta_table] =>
[meta_id_column] =>
[primary_table] =>
[primary_id_column] =>
[table_aliases:protected] => Array
(
) [clauses:protected] => Array
(
) [has_or_relation:protected] =>
) [date_query] =>
[request] => SELECT SQL_CALC_FOUND_ROWS wp_yjtqs8r8ff_posts.ID
FROM wp_yjtqs8r8ff_posts
WHERE 1=1 AND ((wp_yjtqs8r8ff_posts.post_type = 'post' AND (wp_yjtqs8r8ff_posts.post_status = 'publish' OR wp_yjtqs8r8ff_posts.post_status = 'expired' OR wp_yjtqs8r8ff_posts.post_status = 'acf-disabled' OR wp_yjtqs8r8ff_posts.post_status = 'tribe-ea-success' OR wp_yjtqs8r8ff_posts.post_status = 'tribe-ea-failed' OR wp_yjtqs8r8ff_posts.post_status = 'tribe-ea-schedule' OR wp_yjtqs8r8ff_posts.post_status = 'tribe-ea-pending' OR wp_yjtqs8r8ff_posts.post_status = 'tribe-ea-draft')))
ORDER BY wp_yjtqs8r8ff_posts.post_date DESC
LIMIT 96, 8
[posts] => Array
(
[0] => WP_Post Object
(
[ID] => 1258
[post_author] => 7
[post_date] => 2019-04-25 15:18:38
[post_date_gmt] => 2019-04-25 15:18:38
[post_content] => Keyva announces the release of open source version for ServiceNow App that integrates with Red Hat Ansible using Ansible Tower (or AWX) APIs. The integration allows users to trigger Ansible jobs from within ServiceNow Catalog Requests or Change tickets. Users have the ability to customize triggers to suit their own needs – to not only launch the Ansible job from a specific ServiceNow Application, but also being able to define the specific conditions (e.g. Status field set to ‘In Progress’). Many organizations use Ansible as the automation and orchestration layer, while using ServiceNow as their ITSM suite and CMDB. There are several common use cases that require an integration between the two offerings. [caption id="attachment_1260" align="alignleft" width="211"] Fig 1a - Sample Provisioning Use Case[/caption] A similar use case can be implemented using this integration for Day 2 tasks like Patching, or Unprovisioning. Customers that are looking to launch a service request through a centralized portal like ServiceNow, and have Ansible as their orchestration fulfillment engine can leverage this open sourced integration. Check out the sample provisioning use case (fig 1a) here. You can check out the integration on our GitHub repository here - https://github.com/keyva/ansible If you have any questions, or feedback, please reach out to a Keyva associate at [email protected] [post_title] => ServiceNow App for Red Hat Ansible Automation
[post_excerpt] =>
[post_status] => publish
[comment_status] => closed
[ping_status] => closed
[post_password] =>
[post_name] => servicenow-app-for-red-hat-ansible-automation
[to_ping] =>
[pinged] =>
[post_modified] => 2024-05-15 18:57:15
[post_modified_gmt] => 2024-05-15 18:57:15
[post_content_filtered] =>
[post_parent] => 0
[guid] => https://keyvatech.com/?p=1258
[menu_order] => 32
[post_type] => post
[post_mime_type] =>
[comment_count] => 0
[filter] => raw
) [1] => WP_Post Object
(
[ID] => 1187
[post_author] => 7
[post_date] => 2019-02-28 19:55:38
[post_date_gmt] => 2019-02-28 19:55:38
[post_content] => Many organizations have started utilizing DevOps practices and tools for data warehousing and data lake setups. Data Analysts and Database Managers can follow DevOps practices for managing updates and new database releases across various environments in a uniform fashion, to produce repeatable results. Just like application teams create and manage the CI/CD pipeline for applications, the data that these applications consume can have its own release pipeline that is managed by the database teams. In many cases, cloud based data warehousing platforms provide the ability to host the applications that consume this data, all within the same environment. Applications that consume data housed in a data warehouse may also leverage Kafka or other DevOps tools to achieve low latency query performance. As you release updates to your applications, you may also need to account for the updates to the service bus layer and the database layer. Continuous deployment and continuous integration becomes all the more important. Data teams that institute DevOps practices and tools for data warehousing can promote an agile culture within their silos. This includes the process of fetching or discovering the data for data warehousing, the process of making sure it is current and accurate for the consuming applications, and the process of organizing it for data mining and analysis. You can apply DevOps practices and policies to data automation (just like infrastructure automation). Starting from self-service models to request new data instances, to requesting updates, and other data lifecycle steps. There are many organizations that have built entire data platforms on containers. For infrastructure and database teams, it is imperative to provide data "as-a-service" with measured and tracked SLAs and costs – whether these services are provided on container platforms or otherwise. Public cloud platforms have made it easy for consumers to leverage SaaS data warehousing solutions. Using DevOps practices do not have to be limited to providing the underlying infrastructure or service, but can also be applied to the building of reports. Jenkins automation can be used to release database updates, integration tools can be used to fetch the relevant data from multiple sources to populate the target systems, and opensource tools like Grafana can be used for dashboards. Primary objective of such a setup would be to capture data from various components and locations within the environment to a centralized location via ETL, and process that data to produce business intelligence. When bringing data in from multiple sources for data warehousing, the exercise of data mapping and data reconciliation and sanitization usually take the most time and effort upfront. Architectural considerations also include the paradigm of monitoring the data warehouse components, as well as the data within it. Data processing engines like Hadoop MapReduce or Spark, along with the database serving platforms form the core components of any data warehouse setup. By implementing the best practices architecture, and tuning specifically for your environment, you can optimize your data warehouse setup to achieve a balance between performance and cost.Various industry use cases like fraud prevention in banking, storing health records and doctors notes in healthcare, customer profiling for retail, real time streaming in media, and others, have already leveraged the benefits provided by data lakes for capturing and storing unstructured data, and data warehousing for structured data. With the adoption of blockchain technologies, the relevance of Big Data is only anticipated to grow. Most enterprises depend heavily on applications for their business, and thereby have adopted agile processes for application releases. Combining the consumption of Big Data with emphasis on extracting relevant and accurate data at the right time, is paramount for business critical applications. The adoption of DevOps practices and tools for data warehousing within data teams is still in its nascent stage, but is being picked up by more and more data experts every day.If you need assistance with data warehousing to move your disparate data from various sources, or need help assessing the feasibility of a data warehouse platform without substantially affecting your business critical applications, Keyva can help. Associates at Keyva have worked with many different organizations in various verticals to help in data migration and application modernization projects. These include things like creating a data migration factory, creating ETL strategies with data mapping, refactoring existing applications, adding a wrapper over current applications so they can be consumed easily by DevOps processes, modifying existing applications to consume data from SaaS platforms, and more. If you'd like to have us review your environment and provide suggestions on what might work for you, please contact us at [email protected]. Anuj joined Keyva from Tech Data where he was the Director of Automation Solutions. In this role, he specializes in developing and delivering vendor-agnostic solutions that avoid the “rip-and-replace” of existing IT investments. Tuli has worked on Cloud Automation, DevOps, Cloud Readiness Assessments and Migrations projects for healthcare, banking, ISP, telecommunications, government and other sectors. During his previous years at Avnet, Seamless Technologies, and other organizations, he held multiple roles in the Cloud and Automation areas. Most recently, he led the development and management of Cloud Automation IP (intellectual property) and related professional services. He holds certifications for AWS, VMware, HPE, BMC and ITIL, and offers a hands-on perspective on these technologies. Like what you read? Follow Anuj on LinkedIn at https://www.linkedin.com/in/anujtuli/ [post_title] => DevOps and Data Warehousing
[post_excerpt] =>
[post_status] => publish
[comment_status] => closed
[ping_status] => closed
[post_password] =>
[post_name] => devops-and-data-warehousing
[to_ping] =>
[pinged] =>
[post_modified] => 2020-01-22 18:20:10
[post_modified_gmt] => 2020-01-22 18:20:10
[post_content_filtered] =>
[post_parent] => 0
[guid] => https://keyvatech.com/?p=1187
[menu_order] => 33
[post_type] => post
[post_mime_type] =>
[comment_count] => 0
[filter] => raw
) [2] => WP_Post Object
(
[ID] => 1164
[post_author] => 7
[post_date] => 2019-02-25 20:16:42
[post_date_gmt] => 2019-02-25 20:16:42
[post_content] => This write-up walks through setting up a two node Hadoop v3.1.1 cluster, and running a couple of sample MapReduce jobs. Prerequisites:
Two machines set up with RHEL 7. You could use another distribution, but the commands may vary.
Perl, wget, and other required packages downloaded using yum
Disable the firewall, or open up connectivity between the two machines. Since we are setting it up as a lab instance, we will go ahead and disable the firewall
hadoop1 will be the master node, and hadoop2 will be the datanode.
Add entry for hadoop1 and hadoop2 under /etc/hosts on both machines. We will need a JDK installation (on both machines):
yum install java-1.8.0-openjdk -y
You can validate that java is installed by querying for the installed version
java -version
Create a separate directory under '/' path where we will download the bits for hadoop (on both machines)
mkdir hadoop
cd /hadoop/
wget http://mirror.cc.columbia.edu/pub/software/apache/hadoop/common/hadoop-3.1.1/hadoop-3.1.1.tar.gz
tar -xzf hadoop-3.1.1.tar.gz
In order to point hadoop to the correct java installation, we will need to capture the full path of java install
readlink -f $(which java)
Export the path as environment variable (on both machines)
We will modify the bashrc profile file to make sure that all the required environment variables are available when we log in to the machine console. This change is made (on both machines):
On the master node, update the workers file to reflect the slave nodes
vi /hadoop/hadoop-3.1.1/etc/hadoop/workers
Add the entry
hadoop2
And then on the master node, format the hdfs file system:
/hadoop/hadoop-3.1.1/bin/hdfs namenode -format
On the datanode, format the hdfs file system:
/hadoop/hadoop-3.1.1/bin/hdfs datanode –format
On the master node, start the dfs service:
/hadoop/hadoop-3.1.1/sbin/start-dfs.sh
On the master node, run the dfsadmin report, to validate the availability of datanodes
/hadoop/hadoop-3.1.1/bin/hdfs dfsadmin -report
The output of this command should show two entries for datanodes - one for hadoop1 and one for hadoop2. The nodes are now set up to handle MapReduce jobs. We will look at two examples. We will use the sample jobs from hadoop-mapreduce-examples-3.1.1.jar file under the share folder. There is a large number of opensource java projects available, which run various kinds of mapreduce jobs. We will run these exercises on the master node. Exercise 1: We will solve a sudoku puzzle using MapReduce. First we will need to create a sudoku directory under root folder in hdfs file system.
/hadoop/hadoop-3.1.1/bin/hdfs dfs -mkdir /sudoku
Then create an input file with the sudoku puzzle, under your current directory:
vi solve_this.txt
Update the file with the below text. Each entry on the same line is separated by a space.
Found 1 solutions Exercise 2: We will run a wordcount method on the sudoku puzzle file. Run the wordcount method on the sudoku puzzle file, and have the output stored in wcount_result folder.
/hadoop/hadoop-3.1.1/bin/hadoop jar /hadoop/hadoop-3.1.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-3.1.1.jar wordcount /sudoku/solve_this.txt /sudoku/wcount_result
The lengthy output lists out the results of detailed analysis conducted on the file. We will cat the results of various results,
The above output captures the total number of times a particular digit is listed in the solved puzzle. To see all the different sample methods available under hadoop-mapreduce-examples-3.1.1.jar, run the following command:
/hadoop/hadoop-3.1.1/bin/hadoop jar /hadoop/hadoop-3.1.1/share/hadoop/mapreduce/hadoop-mapreduce-examples-3.1.1.jar
If you have any questions about the steps documented here, would like more information on the installation procedure, or have any feedback or requests, please let us know at [email protected]. Anuj joined Keyva from Tech Data where he was the Director of Automation Solutions. In this role, he specializes in developing and delivering vendor-agnostic solutions that avoid the “rip-and-replace” of existing IT investments. Tuli has worked on Cloud Automation, DevOps, Cloud Readiness Assessments and Migrations projects for healthcare, banking, ISP, telecommunications, government and other sectors. During his previous years at Avnet, Seamless Technologies, and other organizations, he held multiple roles in the Cloud and Automation areas. Most recently, he led the development and management of Cloud Automation IP (intellectual property) and related professional services. He holds certifications for AWS, VMware, HPE, BMC and ITIL, and offers a hands-on perspective on these technologies. Like what you read? Follow Anuj on LinkedIn at https://www.linkedin.com/in/anujtuli/ [post_title] => How to set up Hadoop two node cluster and run MapReduce jobs
[post_excerpt] =>
[post_status] => publish
[comment_status] => closed
[ping_status] => closed
[post_password] =>
[post_name] => how-to-set-up-hadoop-two-node-cluster-and-run-mapreduce-jobs
[to_ping] =>
[pinged] =>
[post_modified] => 2023-06-28 18:07:13
[post_modified_gmt] => 2023-06-28 18:07:13
[post_content_filtered] =>
[post_parent] => 0
[guid] => https://keyvatech.com/?p=1164
[menu_order] => 34
[post_type] => post
[post_mime_type] =>
[comment_count] => 0
[filter] => raw
) [3] => WP_Post Object
(
[ID] => 1149
[post_author] => 7
[post_date] => 2019-02-19 19:48:42
[post_date_gmt] => 2019-02-19 19:48:42
[post_content] => This technical guide will walk you through the installations of Ansible v2.4.2.0, an open source configuration management and deployment tool, and Ansible Tower (web layer for Ansible) v3.4.1 on a RHEL 7 virtual machine. Ansible Tower is a RedHat supported and paid version of AWX, which is open source. We will first enable the required repos:
You can install the latest version of ansible using Yum:
yum install ansible
(Since we will be installing Ansible Tower on this same machine, it is recommended to use the Yum method to install Ansible). -OR- You can build the RPM package by downloading the latest version of Ansible code from Git. If choosing this method, first we will need to get all the pre-requisite libraries ready (some of these are optional):
mkdir ansible
cd ansible/
git clone https://github.com/ansible/ansible.git
systemctl stop firewalld
systemctl disable firewalld
cd ./ansible/
make rpm
rpm -Uvh ./rpm-build/ansible-*.noarch.rpm
Once installed, you can view and modify the default Ansible hosts file at /etc/ansible/hosts You can also verify successful installation using the command:
ansible –-version
Now, we can go ahead and set up Ansible Tower on this machine. We will be using the integrated installation, which installs the GUI, the REST API, and the database – all on the same machine:
mkdir ansible-tower
cd ansible-tower/
wget https://releases.ansible.com/ansible-tower/setup-bundle/ansible-tower-setup-bundle-3.4.1-1.el7.tar.gz
tar xvzf ansible-tower-setup-bundle-3.4.1-1.el7.tar.gz
cd ansible-tower-setup-bundle-3.4.1-1.el7/
Tower connects to the PostgreSQL database using password authentication. We will need to create a md5 hash to configure Tower to talk with the database. Replace <CUSTOM-DB-PASSWORD> with a password of your choosing:
Make a note of the hash key generated from this command. We will use it for our next step. We have to now update the inventory file (located within ansible-tower-setup-bundle-3.4.1-1.el7 directory) with the passwords for the database, the hash key generated above, and a custom password of our choosing for rabbit_mq. Find the following lines and update them accordingly. First, for setting the admin password for the console
admin_password='AdminPassword'
Next, set the password for database connectivity. Please note, this password should be the same as what you used to replace <CUSTOM-DB-PASSWORD> during the hash key generation step above. Also, we will paste the copied hash key, and set it for the hashed password line
You can use the admin credentials (username: admin, password: admin password as defined in the inventory file) to log in and access the console. You can request a free Ansible Tower license for an evaluation environment of up to 10 nodes, or can purchase a RedHat subscription for larger environments, and some additional logging, management and support features. If you have any questions about the steps documented here, would like more information on the installation procedure, or have any feedback or requests, please let us know at [email protected]. Anuj joined Keyva from Tech Data where he was the Director of Automation Solutions. In this role, he specializes in developing and delivering vendor-agnostic solutions that avoid the “rip-and-replace” of existing IT investments. Tuli has worked on Cloud Automation, DevOps, Cloud Readiness Assessments and Migrations projects for healthcare, banking, ISP, telecommunications, government and other sectors. During his previous years at Avnet, Seamless Technologies, and other organizations, he held multiple roles in the Cloud and Automation areas. Most recently, he led the development and management of Cloud Automation IP (intellectual property) and related professional services. He holds certifications for AWS, VMware, HPE, BMC and ITIL, and offers a hands-on perspective on these technologies. Like what you read? Follow Anuj on LinkedIn at https://www.linkedin.com/in/anujtuli/ [post_title] => Installation Guide: Ansible & Ansible Tower - Day 1
[post_excerpt] =>
[post_status] => publish
[comment_status] => closed
[ping_status] => closed
[post_password] =>
[post_name] => installing-guide-ansible-and-ansible-tower-day-1
[to_ping] =>
[pinged] =>
[post_modified] => 2019-02-21 19:08:28
[post_modified_gmt] => 2019-02-21 19:08:28
[post_content_filtered] =>
[post_parent] => 0
[guid] => https://keyvatech.com/?p=1149
[menu_order] => 35
[post_type] => post
[post_mime_type] =>
[comment_count] => 0
[filter] => raw
) [4] => WP_Post Object
(
[ID] => 1144
[post_author] => 7
[post_date] => 2019-01-30 20:13:42
[post_date_gmt] => 2019-01-30 20:13:42
[post_content] => You may have heard the term Microservices when talking about applications, and some cases even when talking about underlying infrastructure. If you've worked with any OOPS language, you must be familiar with the concept of abstraction and modularization. Extend that idea to an application - the breakdown of independent functions abstracted in to separate services; with a bunch of services working together forming an application. Typically, the more fine grained and decoupled your services are, the more scalability can be achieved. Let's take a look at some of the reasons why it is important for your critical applications to be be implemented with a microservices architecture framework -
Scalability: By splitting down the function of an application in to a microservices, it can be scaled independently of other functions or components. For example, a microservice responsible for managing database connections can scale independently of the web tier, if the db connection pool size is to be increased.
Decoupling: Separating the functions into individual services provides flexibility for design, implementation, and maintenance of those individual services.
Continuous Delivery & Updates: Functional upgrades can be released without affecting other components. Each function or service can have its own release pipeline, list of enhancements, and priority for feature releases.
Error Micro-segmentation: Error in one service will be isolated within that service. Faults will not propagate to other functions as they are modularized and separated. Also, releasing an update to address the issue is quicker and more efficient in this architecture.
Parallel Development & Domain Expertise: Each service can have domain specific experts working on it. In the case of monolithic applications, the entire application stack needs to be updated for feature releases, as well as development is stymied because of complexity and interdependencies.
Reduced Deployment Time: Individual services can be deployed with a focus on function specific features and environments. Development, testing, and pipeline release is for smaller modules rather than the entire application stack. Frequent updates can be made to individual services, and those updates can be deployed in production much faster.
If you need assistance in determining the feasibility of transforming your applications from monolithic to microservices architecture, Keyva can help. Associates at Keyva have worked with many different organizations in various verticals to help in application modernization projects. These include things like refactoring existing applications, adding a wrapper over current applications so they can be consumed easily by DevOps processes, and more. If you'd like to have us review your environment and provide suggestions on what might work for you, please contact us at [email protected]. Anuj joined Keyva from Tech Data where he was the Director of Automation Solutions. In this role, he specializes in developing and delivering vendor-agnostic solutions that avoid the “rip-and-replace” of existing IT investments. Tuli has worked on Cloud Automation, DevOps, Cloud Readiness Assessments and Migrations projects for healthcare, banking, ISP, telecommunications, government and other sectors. During his previous years at Avnet, Seamless Technologies, and other organizations, he held multiple roles in the Cloud and Automation areas. Most recently, he led the development and management of Cloud Automation IP (intellectual property) and related professional services. He holds certifications for AWS, VMware, HPE, BMC and ITIL, and offers a hands-on perspective on these technologies. Like what you read? Follow Anuj on LinkedIn at https://www.linkedin.com/in/anujtuli/ [post_title] => What is Microservices Architecture?
[post_excerpt] =>
[post_status] => publish
[comment_status] => closed
[ping_status] => closed
[post_password] =>
[post_name] => what-is-microservices-architecture
[to_ping] =>
[pinged] =>
[post_modified] => 2019-01-30 20:13:53
[post_modified_gmt] => 2019-01-30 20:13:53
[post_content_filtered] =>
[post_parent] => 0
[guid] => https://keyvatech.com/?p=1144
[menu_order] => 36
[post_type] => post
[post_mime_type] =>
[comment_count] => 0
[filter] => raw
) [5] => WP_Post Object
(
[ID] => 1139
[post_author] => 7
[post_date] => 2019-01-21 19:22:52
[post_date_gmt] => 2019-01-21 19:22:52
[post_content] => If you've tried to install Kong from scratch, you know that it is a little more involved than what is posted on the official website guide. There are some pre-requisites that the system needs to be set up with, in order to make sure the service installs and runs successfully. Let us take a look at the steps involved in setting up a Kong instance from the ground up. This guide is valid for RHEL 7, PostgreSQL 11, and Kong 0.14.1. We will set up Kong on a VMware virtual machine. If you are using another hypervisor, the steps for hardware configurations may differ slightly. First, let us take a look at the pre-requisite steps that need to be addressed before we begin our work.
RHEL 7 VM with minimum/base packages
A valid Subscription with RedHat with appropriate entitlements
Static IP configuration set up
/etc/hosts configured for name resolution
/etc/hostname set up with the fully qualified domain name
SSH Keys copied on your local workstation (optional)
Nameserver set up
firewalld service disabled
Yum enabled
Non-root user set up for sudo access (optional)
VMware tools package installed
Prepare and Install OS Packages Since we set up RHEL with minimal packages, we would need to enable all the needed rpms. First register with subscription manager using your Red Hat profile credentials: subscription-manager register subscription-manager refresh subscription-manager attach –-auto subscription-manager repos –-list subscription-manager repos --enable rhel-7-server-rh-common-beta-rpms subscription-manager repos --enable rhel-7-server-rpms subscription-manager repos --enable rhel-7-server-source-rpms subscription-manager repos --enable rhel-7-server-rh-common-source-rpms subscription-manager repos --enable rhel-7-server-rh-common-debug-rpms subscription-manager repos --enable rhel-7-server-optional-source-rpms subscription-manager repos --enable rhel-7-server-extras-rpms Finish setting up other utils: yum repolist yum -y update yum –y install wget PostgreSQL 11 Installation We will first have to set up PostgreSQL 9.5+ or Cassandra database to use with Kong. The version compatibility is listed on Kong's website. We decided to go with PostgreSQL 11. Download the latest build for PostgreSQL 11 curl -O https://download.postgresql.org/pub/repos/yum/11/redhat/rhel-7-x86_64/pgdg-redhat11-11-2.noarch.rpm Extract the package rpm -ivh pgdg-redhat11-11-2.noarch.rpm Find the relevant RPMs and install yum list postgres* yum install postgresql11-server.x86_64 Initialize the DB /usr/pgsql-11/bin/postgresql-11-setup initdb Start the PostgreSQL service systemctl enable postgresql-11.service systemctl start postgresql-11.service Verify the install was successful and the service is up, as well as set the database password su - postgres -c "psql" postgres=# \password postgres postgres-# \q Set up the database for Kong postgres=# create user kong; create database kong owner kong; Make a note of the configuration file location. We will need this later postgres=# show hba_file; hba_file ------------------------------------ /var/lib/pgsql/11/data/pg_hba.conf (1 row) We will also set (or update) the password for the kong user. It is recommended you use a password with no numbers (string only). It has been observed with previous versions that using numbers in the password can cause issues. Since this will be lab environment, it may be easier to choose a string only password and avoid troubleshooting any potential issues caused due to this. postgres=# alter user kong with password 'kong'; Kong 0.14.1 Installation Enable the relevant EPEL repository EL_VERSION=`cat /etc/redhat-release | grep -oE '[0-9]+\.[0-9]+'` && \ > sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-${EL_VERSION%.*}.noarch.rpm Fetch the latest Kong package wget https://bintray.com/kong/kong-community-edition-rpm/download_file?file_path=rhel%2F7%2Fkong-community-edition-0.14.1.rhel7.noarch.rpm The saved file gets a strange name, so let's change the name to make it cleaner mv download_file\?file_path\=rhel%2F7%2Fkong-community-edition-0.14.1.rhel7.noarch.rpm kong-community-edition-0.14.1.rhel7.noarch.rpm Extract the package rpm -ivh kong-community-edition-0.14.1.rhel7.noarch.rpm You can now verify the install by running the command kong on the prompt. It will display various command options for kong. We will now need to modify the PostgreSQL config file to make sure the database allows connections from Kong. We had noted the location of the pg_hba.conf file earlier in the database install section. vi /var/lib/pgsql/11/data/pg_hba.conf Add the IP of Kong host (or let the default entry of 127.0.0.1 stay as is, if installing on the same host as the database) under the IPv4 section. Change the authentication method from ident to md5. This is important – otherwise you will get ident authentication errors when you try to start the kong service later. The Kong configuration file that ships with the package has an extension .default to it. We therefore copy the default file to the .conf extension before making changes cp /etc/kong/kong.conf.default /etc/kong/kong.conf By default, all entries in the configuration file are commented out. You can choose to let them be as is. Or you can decide to uncomment specific sections or specific lines. In our case, we will do the following: 1) Under the GENERAL section, uncomment the prefix line, and update it to the following path prefix = /etc/nginx/ssl 2) Under the NGINX section, uncomment the admin_listen line, and update it as per the following admin_listen = <Kong_Server_IP>:8001, 127.0.0.1:8001, 127.0.0.1:8444 ssl 3) Under the DATASTORE section, uncomment the lines for postgresql, and update them with the connection information for your database. database = postgres pg_host = 127.0.0.1 pg_port = 5432 pg_user = kong pg_password = <Password you set during DB initiation> pg_database = kong pg_ssl = off pg_ssl_verify = off Save and exit the configuration file. Also note that we changed the default prefix path; so we will need to manually create a directory for that path, and chmod the appropriate permissions for that folder. cd /etc mkdir nginx cd nginx mkdir ssl cd / chown –R root:root /etc/nginx/ssl chmod –R 600 /etc/nginx/ssl Run Kong migrations kong migrations up [-c /etc/kong/kong.conf] --v Start the Kong service kong start [-c /etc/kong/kong.conf] --v Verify the service is up and running curl -i http://<Kong_Server_IP>:8001/ You can also verify the service status by going to URL http://<Kong_Server_IP>:8001/ via a browser. Troubleshooting 1) Error: Absolute path to the certificate /usr/local/share/lua/5.1/kong/cmd/start.lua:71: /usr/local/share/lua/5.1/kong/cmd/start.lua:28: nginx configuration is invalid (exit code 1): nginx: [emerg] SSL_CTX_load_verify_locations("/etc/nginx/ssl/# Absolute path to the certificate") failed (SSL: error:02001002:system library:fopen:No such file or directory:fopen('/etc/nginx/ssl/# Absolute path to the certificate','r') error:2006D080:BIO routines:BIO_new_file:no such file error:0B084002:x509 certificate routines:X509_load_cert_crl_file:system lib) nginx: configuration file /etc/nginx/ssl/nginx.conf test failed If you get an error like the above, make sure that the DEVELOPMENT & MISCELLANEOUS section in the /etc/kong/kong.conf file are all commented out. If you are not using SSL certificates, you may want to comment out all lines that point to the certificate path (e.g. #lua_ssl_trusted_certificate = ) 2) Error: Permission Denied This could be caused by Kong trying to access the default prefix path of /usr/local/kong without proper permissions. If the install is being run by a non-root user, you will have to provide folder write permissions for that user as well. Alternatively, you can manually create a different folder (see instructions above) and assign appropriate write permissions to it. 3) Error: During service verification, the curl command works, but the browser URL access fails with Page Not Found Make sure that the admin_listen = <Kong_Server_IP>:8001, 127.0.0.1:8001, 127.0.0.1:8444 ssl line is not commented out in the /etc/kong/kong.conf file. Also, Kong_Server_IP should reflect the IPv4 address of the server where Kong is installed. 4) Error: Unauthorized ident access Make sure that in the postgresql configuration file (/var/lib/pgsql/11/data/pg_hba.conf), the access method for the kong host is set to md5. 5) Error: In postgres.lua Expected value but found T_END at character 1 Check the database configuration parameters in /etc/kong/kong.conf. Verify the database server name, IP, and credentials are correct. Also, try to drop the database and recreating it within PostgreSQL (in case the database got corrupt). You can do so by running the commands postgres=# drop database kong; postgres=# create database kong owner kong; and then restarting the PostgreSQL service from the console systemctl stop postgresql-11 systemctl start postgresql-11 If you have any questions about the steps documented here, would like more information on the installation procedure, or have any feedback or requests, please let us know at [email protected]. Anuj joined Keyva from Tech Data where he was the Director of Automation Solutions. In this role, he specializes in developing and delivering vendor-agnostic solutions that avoid the “rip-and-replace” of existing IT investments. Tuli has worked on Cloud Automation, DevOps, Cloud Readiness Assessments and Migrations projects for healthcare, banking, ISP, telecommunications, government and other sectors. During his previous years at Avnet, Seamless Technologies, and other organizations, he held multiple roles in the Cloud and Automation areas. Most recently, he led the development and management of Cloud Automation IP (intellectual property) and related professional services. He holds certifications for AWS, VMware, HPE, BMC and ITIL, and offers a hands-on perspective on these technologies. Like what you read? Follow Anuj on LinkedIn at https://www.linkedin.com/in/anujtuli/ [post_title] => Installing Kong: A First Day Guide
[post_excerpt] =>
[post_status] => publish
[comment_status] => closed
[ping_status] => closed
[post_password] =>
[post_name] => installing-kong-a-first-day-guide
[to_ping] =>
[pinged] =>
[post_modified] => 2020-03-05 19:20:18
[post_modified_gmt] => 2020-03-05 19:20:18
[post_content_filtered] =>
[post_parent] => 0
[guid] => https://keyvatech.com/?p=1139
[menu_order] => 37
[post_type] => post
[post_mime_type] =>
[comment_count] => 0
[filter] => raw
) [6] => WP_Post Object
(
[ID] => 1135
[post_author] => 7
[post_date] => 2019-01-17 20:45:17
[post_date_gmt] => 2019-01-17 20:45:17
[post_content] => We've all heard that containers and cloud technologies help us avoid lock-ins. While most of that is true, you could be unintentionally locking yourself to a particular technology or framework, if you are not careful. Let's talk about some important aspects to consider, when you are evaluating a cloud or a container platform. 1. Compile a list the features you want a platform to have, based on your requirements Your needs to consume a hybrid cloud architecture may be different from other organizations. The vision of having more flexibility and agility may be the same, but your workloads are different. It is important to know the in-depth architecture of the applications and workloads you want to live in the cloud or container environment. 2. Support You may have thousands of commercial-off-the-shelf applications, and hundreds of custom built ones. Some application vendors may not support containerizing their applications. There could be regulatory or other limitations that prevent certain legacy applications from being supported on containers. Depending upon how critical and integral such applications are to your business, you may want to consider newer offerings that make the full use of cloud and containerization benefits like microservices architecture. 3. Licensing and Exit costs Platform cost is always a factor, no matter the size of the organization. One of the native benefits that containerization provides, is the ability to control costs – assuming your applications are architected accordingly. For example, a cloud-native application can be scaled in to public cloud for just the webtier when necessary (assuming microservices architecture) rather than spinning up large capacity VMs on demand in the cloud for monolithic applications. Another important consideration is around how the platforms are priced. A platform that prices based on worker nodes may be more feasible for an organization that plans on developing hundreds or thousands of microservices, and making them run efficiently on worker nodes. A platform priced based on the number of applications containerized may be more feasible for monolithic applications. You will need to take in to account how your organization plans to scale the containerizing of applications, and what the application architectures will be like. Although you will be making a decision based on long term usage of the platform, it is always a good idea to see what the exit costs will be once you decide to move away from the chosen platform. As an exercise, see if you can figure out what changes would need to be made to the application architecture, the deployment architecture, resource skill sets, and more, if you were to move away from the chosen platform. That can then serve as a yardstick for how locked-in you might be, upon choosing a particular solution. There will always be some level of customization effort an lock-in, no matter which solution you choose – but the idea would be to minimize the monetary and non-monetary exit costs. 4. Detailed architecture knowledge about your applications Nobody wants to make major technology adoption decisions based on hearsay. It is critical to know the detailed application architectures, as that will guide you towards choosing the right solution. If the bulk of your application workloads are legacy monolithic applications, the primary tangible benefit obtained by containerizing them will be horizontal scaling. This is assuming the application vendors will support the containerized versions, the applications will allow multiple instances running within the same sub-net, the application can be used with a load balancer, and so on. To leverage the self-healing and other native capabilities offered by containers, the cloud-native applications developed in a microservices architecture would be an ideal workload. But in the real-world, there is always going to be a mix of legacy and newer workloads. For highly regulated industries like healthcare, insurance, banking, and others – legacy applications may form the biggest chunk of their workloads. If you have the time, it would be ideal to do a cost-benefit analysis for legacy workloads to compare the benefits offered by horizontal scalability and the like, before choosing to move forward with containerization. 5. Implementing vendor agnostic tools, and processes, refactoring if necessary Choosing a platform is just one piece of this complex puzzle involving the rollout of new technologies. There are critical tasks pre- and post- containerization, that also need to be accounted for. For example, how easy or difficult will it be to refactor the existing application for a chosen platform? How easy or difficult will it be to monitor the performance of the chosen platform? How easy or difficult will it be to integrate the chosen platform with other toolsets? Will there be changes needed to your existing processes or toolsets, so they can work effectively with the chosen platform? The end goal for most organizations is to have an easy transition path into Cloud and Container platforms – and to use these platforms as effective tools for their IT teams and business in general. This vision can be best achieved by having a vendor agnostic strategy, and avoiding vendor lock-ins wherever possible. Associates at Keyva have helped multiple organizations assess their application readiness and move them in to containerized and cloud environments. Keyva also helps with the pre- and post- containerization steps. These include things like refactoring existing applications, adding a wrapper over current applications so they can be consumed easily by DevOps processes, and more. If you'd like to have us review your environment and provide suggestions on what might work for you, please contact us at [email protected] Anuj joined Keyva from Tech Data where he was the Director of Automation Solutions. In this role, he specializes in developing and delivering vendor-agnostic solutions that avoid the “rip-and-replace” of existing IT investments. Tuli has worked on Cloud Automation, DevOps, Cloud Readiness Assessments and Migrations projects for healthcare, banking, ISP, telecommunications, government and other sectors. During his previous years at Avnet, Seamless Technologies, and other organizations, he held multiple roles in the Cloud and Automation areas. Most recently, he led the development and management of Cloud Automation IP (intellectual property) and related professional services. He holds certifications for AWS, VMware, HPE, BMC and ITIL, and offers a hands-on perspective on these technologies. Like what you read? Follow Anuj on LinkedIn at https://www.linkedin.com/in/anujtuli/ [post_title] => Avoid Cloud & Container Vendor Lock-In
[post_excerpt] =>
[post_status] => publish
[comment_status] => closed
[ping_status] => closed
[post_password] =>
[post_name] => avoidcloudandcontainerlockin
[to_ping] =>
[pinged] =>
[post_modified] => 2020-03-05 19:12:02
[post_modified_gmt] => 2020-03-05 19:12:02
[post_content_filtered] =>
[post_parent] => 0
[guid] => https://keyvatech.com/?p=1135
[menu_order] => 38
[post_type] => post
[post_mime_type] =>
[comment_count] => 0
[filter] => raw
) [7] => WP_Post Object
(
[ID] => 1131
[post_author] => 7
[post_date] => 2019-01-15 21:26:42
[post_date_gmt] => 2019-01-15 21:26:42
[post_content] => You might've heard this before – configuration management database (CMDB) should be the single source of truth. But what does that mean? And how can you achieve it? With all the different third party applications in your environment, you might think it could be a gargantuan effort to consolidate all the data in to the CMDB. It may be a decent amount of work, but the reality is that it is easier than what most would anticipate. By taking the 3 step approach below, you can come close to configuration management database nirvana – a current and accurate CMDB: Step 1: Develop, and agree on a configuration management database schema, and the necessary mappings This step requires the IT Service Management (ITSM) teams, and the various business units to agree on a configuration management database schema and how it will be organized. ITSM teams would create data mappings – to help map data captured from various software components in the environment, into the specified fields within the CMDB schema. This would also include activities around customization of various fields within the CMDB forms, and customizations around API access. Step 2: Integrate and Automate Integrate the configuration management database with all sources of data as per the identified data mappings. This can be done by leveraging existing integrations, or by creating new ones. The population of data within the CMDB can be done as part of an extract-transfer-load process (retrospectively), or as part of the creation of a CI using automation (prospectively). The process of CMDB population is a multi-step process, whereby data is captured via one of the discovery tools and automatically updated within the CMDB. Automation of CI population also helps create relationships between CIs and Change tickets or Incident tickets, thereby making the review process for Change Advisory Boards much easier. Step 3: Optimize and Reconcile Once the data is in the configuration management database, it is important to make sure it is accurate. Given that many different sources of data may compete for the same target field within the CMDB, weights can be assigned to each source to improve accuracy. For example, the asset tag of a device may have a higher weightage when that data is coming from a discovery tool, but the CPU information captured within a configuration management system can be trusted more than any other source. Furthermore, the data captured from all the various sources should be put in staging datasets. It is up to the administrators of the system, to define rule sets and reconciliation rules that will automatically filter the required data in to production data set for consumption. The above may seem like oversimplification of the tasks required to have a fully functional CMDB, but many organizations have successfully adopted a version of this breakdown. It is highly likely that the most time will be spent upfront during the configuration management database schema and data mapping exercise. By investing time and effort towards having an accurate CMDB, organizations can effectively understand the various configurations and their relationships within in the environment, and thereby easily track and manage them. Associates at Keyva have been helping customers set up and optimize their ITSM and CMDB systems for the past two decades. We've also helped organizations develop integrations between configuration management database and third party application software so as to accelerate the population of the CMDB, and to keep it current and accurate. If you'd like to have us review your environment and provide suggestions on what might work for you, please contact us at [email protected] Anuj joined Keyva from Tech Data where he was the Director of Automation Solutions. In this role, he specializes in developing and delivering vendor-agnostic solutions that avoid the “rip-and-replace” of existing IT investments. Tuli has worked on Cloud Automation, DevOps, Cloud Readiness Assessments and Migrations projects for healthcare, banking, ISP, telecommunications, government and other sectors. During his previous years at Avnet, Seamless Technologies, and other organizations, he held multiple roles in the Cloud and Automation areas. Most recently, he led the development and management of Cloud Automation IP (intellectual property) and related professional services. He holds certifications for AWS, VMware, HPE, BMC and ITIL, and offers a hands-on perspective on these technologies. Like what you read? Follow Anuj on LinkedIn at https://www.linkedin.com/in/anujtuli/ [post_title] => 3 Steps to Achieving Configuration Management Database (CMDB) Nirvana
[post_excerpt] =>
[post_status] => publish
[comment_status] => closed
[ping_status] => closed
[post_password] =>
[post_name] => 3-steps-to-achieve-configuration-management-database-cmdb-nirvana
[to_ping] =>
[pinged] =>
[post_modified] => 2020-01-22 17:56:20
[post_modified_gmt] => 2020-01-22 17:56:20
[post_content_filtered] =>
[post_parent] => 0
[guid] => https://keyvatech.com/?p=1131
[menu_order] => 39
[post_type] => post
[post_mime_type] =>
[comment_count] => 0
[filter] => raw
) ) [post_count] => 8
[current_post] => -1
[before_loop] => 1
[in_the_loop] =>
[post] => WP_Post Object
(
[ID] => 1258
[post_author] => 7
[post_date] => 2019-04-25 15:18:38
[post_date_gmt] => 2019-04-25 15:18:38
[post_content] => Keyva announces the release of open source version for ServiceNow App that integrates with Red Hat Ansible using Ansible Tower (or AWX) APIs. The integration allows users to trigger Ansible jobs from within ServiceNow Catalog Requests or Change tickets. Users have the ability to customize triggers to suit their own needs – to not only launch the Ansible job from a specific ServiceNow Application, but also being able to define the specific conditions (e.g. Status field set to ‘In Progress’). Many organizations use Ansible as the automation and orchestration layer, while using ServiceNow as their ITSM suite and CMDB. There are several common use cases that require an integration between the two offerings. [caption id="attachment_1260" align="alignleft" width="211"] Fig 1a - Sample Provisioning Use Case[/caption] A similar use case can be implemented using this integration for Day 2 tasks like Patching, or Unprovisioning. Customers that are looking to launch a service request through a centralized portal like ServiceNow, and have Ansible as their orchestration fulfillment engine can leverage this open sourced integration. Check out the sample provisioning use case (fig 1a) here. You can check out the integration on our GitHub repository here - https://github.com/keyva/ansible If you have any questions, or feedback, please reach out to a Keyva associate at [email protected] [post_title] => ServiceNow App for Red Hat Ansible Automation
[post_excerpt] =>
[post_status] => publish
[comment_status] => closed
[ping_status] => closed
[post_password] =>
[post_name] => servicenow-app-for-red-hat-ansible-automation
[to_ping] =>
[pinged] =>
[post_modified] => 2024-05-15 18:57:15
[post_modified_gmt] => 2024-05-15 18:57:15
[post_content_filtered] =>
[post_parent] => 0
[guid] => https://keyvatech.com/?p=1258
[menu_order] => 32
[post_type] => post
[post_mime_type] =>
[comment_count] => 0
[filter] => raw
) [comment_count] => 0
[current_comment] => -1
[found_posts] => 114
[max_num_pages] => 15
[max_num_comment_pages] => 0
[is_single] =>
[is_preview] =>
[is_page] =>
[is_archive] =>
[is_date] =>
[is_year] =>
[is_month] =>
[is_day] =>
[is_time] =>
[is_author] =>
[is_category] =>
[is_tag] =>
[is_tax] =>
[is_search] =>
[is_feed] =>
[is_comment_feed] =>
[is_trackback] =>
[is_home] => 1
[is_privacy_policy] =>
[is_404] =>
[is_embed] =>
[is_paged] => 1
[is_admin] =>
[is_attachment] =>
[is_singular] =>
[is_robots] =>
[is_favicon] =>
[is_posts_page] =>
[is_post_type_archive] =>
[query_vars_hash:WP_Query:private] => 1186af898277f60ca6f9dd3410454f24
[query_vars_changed:WP_Query:private] =>
[thumbnails_cached] =>
[allow_query_attachment_by_filename:protected] =>
[stopwords:WP_Query:private] =>
[compat_fields:WP_Query:private] => Array
(
[0] => query_vars_hash
[1] => query_vars_changed
) [compat_methods:WP_Query:private] => Array
(
[0] => init_query_flags
[1] => parse_tax_query
) [tribe_is_event] =>
[tribe_is_multi_posttype] =>
[tribe_is_event_category] =>
[tribe_is_event_venue] =>
[tribe_is_event_organizer] =>
[tribe_is_event_query] =>
[tribe_is_past] =>
)
Keyva announces the release of open source version for ServiceNow App that integrates with Red Hat Ansible using Ansible Tower (or AWX) APIs. The integration allows users to trigger Ansible ...
Many organizations have started utilizing DevOps practices and tools for data warehousing and data lake setups. Data Analysts and Database Managers can follow DevOps practices for managing updates and new ...
This write-up walks through setting up a two node Hadoop v3.1.1 cluster, and running a couple of sample MapReduce jobs. Prerequisites: Two machines set up with RHEL 7. You could ...
This technical guide will walk you through the installations of Ansible v2.4.2.0, an open source configuration management and deployment tool, and Ansible Tower (web layer for Ansible) v3.4.1 on a ...
You may have heard the term Microservices when talking about applications, and some cases even when talking about underlying infrastructure. If you’ve worked with any OOPS language, you must be ...
If you’ve tried to install Kong from scratch, you know that it is a little more involved than what is posted on the official website guide. There are some pre-requisites ...
We’ve all heard that containers and cloud technologies help us avoid lock-ins. While most of that is true, you could be unintentionally locking yourself to a particular technology or framework, ...
You might’ve heard this before – configuration management database (CMDB) should be the single source of truth. But what does that mean? And how can you achieve it? With all ...