mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Merge pull request #15918 from justinsb/fix_typos
Auto commit by PR queue bot
This commit is contained in:
commit
d453976e8a
@ -55,7 +55,7 @@ const TagNameKubernetesCluster = "KubernetesCluster"
|
|||||||
// We sometimes read to see if something exists; then try to create it if we didn't find it
|
// We sometimes read to see if something exists; then try to create it if we didn't find it
|
||||||
// This can fail once in a consistent system if done in parallel
|
// This can fail once in a consistent system if done in parallel
|
||||||
// In an eventually consistent system, it could fail unboundedly
|
// In an eventually consistent system, it could fail unboundedly
|
||||||
// MaxReadThenCreateRetries sets the maxiumum number of attempts we will make
|
// MaxReadThenCreateRetries sets the maximum number of attempts we will make
|
||||||
const MaxReadThenCreateRetries = 30
|
const MaxReadThenCreateRetries = 30
|
||||||
|
|
||||||
// Abstraction over AWS, to allow mocking/other implementations
|
// Abstraction over AWS, to allow mocking/other implementations
|
||||||
@ -1581,7 +1581,7 @@ func (s *AWSCloud) listSubnetIDsinVPC(vpcId string) ([]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EnsureTCPLoadBalancer implements TCPLoadBalancer.EnsureTCPLoadBalancer
|
// EnsureTCPLoadBalancer implements TCPLoadBalancer.EnsureTCPLoadBalancer
|
||||||
// TODO(justinsb) It is weird that these take a region. I suspect it won't work cross-region anwyay.
|
// TODO(justinsb) It is weird that these take a region. I suspect it won't work cross-region anyway.
|
||||||
func (s *AWSCloud) EnsureTCPLoadBalancer(name, region string, publicIP net.IP, ports []*api.ServicePort, hosts []string, affinity api.ServiceAffinity) (*api.LoadBalancerStatus, error) {
|
func (s *AWSCloud) EnsureTCPLoadBalancer(name, region string, publicIP net.IP, ports []*api.ServicePort, hosts []string, affinity api.ServiceAffinity) (*api.LoadBalancerStatus, error) {
|
||||||
glog.V(2).Infof("EnsureTCPLoadBalancer(%v, %v, %v, %v, %v)", name, region, publicIP, ports, hosts)
|
glog.V(2).Infof("EnsureTCPLoadBalancer(%v, %v, %v, %v, %v)", name, region, publicIP, ports, hosts)
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warnig: memory not supported)
|
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported)
|
||||||
typical use case:
|
typical use case:
|
||||||
rc.ConsumeCPU(600)
|
rc.ConsumeCPU(600)
|
||||||
// ... check your assumption here
|
// ... check your assumption here
|
||||||
|
@ -400,7 +400,7 @@ func runCmd(command string, args ...string) (string, string, error) {
|
|||||||
var bout, berr bytes.Buffer
|
var bout, berr bytes.Buffer
|
||||||
cmd := exec.Command(command, args...)
|
cmd := exec.Command(command, args...)
|
||||||
// We also output to the OS stdout/stderr to aid in debugging in case cmd
|
// We also output to the OS stdout/stderr to aid in debugging in case cmd
|
||||||
// hangs and never retruns before the test gets killed.
|
// hangs and never returns before the test gets killed.
|
||||||
cmd.Stdout = io.MultiWriter(os.Stdout, &bout)
|
cmd.Stdout = io.MultiWriter(os.Stdout, &bout)
|
||||||
cmd.Stderr = io.MultiWriter(os.Stderr, &berr)
|
cmd.Stderr = io.MultiWriter(os.Stderr, &berr)
|
||||||
err := cmd.Run()
|
err := cmd.Run()
|
||||||
@ -447,7 +447,7 @@ func validate(f Framework, svcNameWant, rcNameWant string, ingress api.LoadBalan
|
|||||||
}
|
}
|
||||||
|
|
||||||
// migRollingUpdate starts a MIG rolling update, upgrading the nodes to a new
|
// migRollingUpdate starts a MIG rolling update, upgrading the nodes to a new
|
||||||
// instance template named tmpl, and waits up to nt times the nubmer of nodes
|
// instance template named tmpl, and waits up to nt times the number of nodes
|
||||||
// for it to complete.
|
// for it to complete.
|
||||||
func migRollingUpdate(tmpl string, nt time.Duration) error {
|
func migRollingUpdate(tmpl string, nt time.Duration) error {
|
||||||
By(fmt.Sprintf("starting the MIG rolling update to %s", tmpl))
|
By(fmt.Sprintf("starting the MIG rolling update to %s", tmpl))
|
||||||
@ -464,7 +464,7 @@ func migRollingUpdate(tmpl string, nt time.Duration) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// migTemlate (GCE/GKE-only) returns the name of the MIG template that the
|
// migTemplate (GCE/GKE-only) returns the name of the MIG template that the
|
||||||
// nodes of the cluster use.
|
// nodes of the cluster use.
|
||||||
func migTemplate() (string, error) {
|
func migTemplate() (string, error) {
|
||||||
var errLast error
|
var errLast error
|
||||||
@ -514,7 +514,7 @@ func migRollingUpdateStart(templ string, nt time.Duration) (string, error) {
|
|||||||
// NOTE(mikedanese): If you are changing this gcloud command, update
|
// NOTE(mikedanese): If you are changing this gcloud command, update
|
||||||
// cluster/gce/upgrade.sh to match this EXACTLY.
|
// cluster/gce/upgrade.sh to match this EXACTLY.
|
||||||
// A `rolling-updates start` call outputs what we want to stderr.
|
// A `rolling-updates start` call outputs what we want to stderr.
|
||||||
_, output, err := retryCmd("gcloud", append(migUdpateCmdBase(),
|
_, output, err := retryCmd("gcloud", append(migUpdateCmdBase(),
|
||||||
"rolling-updates",
|
"rolling-updates",
|
||||||
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
|
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
|
||||||
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone),
|
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone),
|
||||||
@ -566,7 +566,7 @@ func migRollingUpdateStart(templ string, nt time.Duration) (string, error) {
|
|||||||
//
|
//
|
||||||
// TODO(mikedanese): Remove this hack on July 29, 2015 when the migration to
|
// TODO(mikedanese): Remove this hack on July 29, 2015 when the migration to
|
||||||
// `gcloud alpha compute rolling-updates` is complete.
|
// `gcloud alpha compute rolling-updates` is complete.
|
||||||
func migUdpateCmdBase() []string {
|
func migUpdateCmdBase() []string {
|
||||||
b := []string{"preview"}
|
b := []string{"preview"}
|
||||||
a := []string{"rolling-updates", "-h"}
|
a := []string{"rolling-updates", "-h"}
|
||||||
if err := exec.Command("gcloud", append(b, a...)...).Run(); err != nil {
|
if err := exec.Command("gcloud", append(b, a...)...).Run(); err != nil {
|
||||||
@ -586,7 +586,7 @@ func migRollingUpdatePoll(id string, nt time.Duration) error {
|
|||||||
Logf("Waiting up to %v for MIG rolling update to complete.", timeout)
|
Logf("Waiting up to %v for MIG rolling update to complete.", timeout)
|
||||||
if wait.Poll(restartPoll, timeout, func() (bool, error) {
|
if wait.Poll(restartPoll, timeout, func() (bool, error) {
|
||||||
// A `rolling-updates describe` call outputs what we want to stdout.
|
// A `rolling-updates describe` call outputs what we want to stdout.
|
||||||
output, _, err := retryCmd("gcloud", append(migUdpateCmdBase(),
|
output, _, err := retryCmd("gcloud", append(migUpdateCmdBase(),
|
||||||
"rolling-updates",
|
"rolling-updates",
|
||||||
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
|
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
|
||||||
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone),
|
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone),
|
||||||
|
@ -41,7 +41,7 @@ import (
|
|||||||
// This test primarily checks 2 things:
|
// This test primarily checks 2 things:
|
||||||
// 1. Daemons restart automatically within some sane time (10m).
|
// 1. Daemons restart automatically within some sane time (10m).
|
||||||
// 2. They don't take abnormal actions when restarted in the steady state.
|
// 2. They don't take abnormal actions when restarted in the steady state.
|
||||||
// - Controller manager sholdn't overshoot replicas
|
// - Controller manager shouldn't overshoot replicas
|
||||||
// - Kubelet shouldn't restart containers
|
// - Kubelet shouldn't restart containers
|
||||||
// - Scheduler should continue assigning hosts to new pods
|
// - Scheduler should continue assigning hosts to new pods
|
||||||
|
|
||||||
|
@ -197,7 +197,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
|||||||
// Use a unique namespace for the resources created in this test.
|
// Use a unique namespace for the resources created in this test.
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
name := "synthlogger"
|
name := "synthlogger"
|
||||||
// Form a unique name to taint log lines to be colelcted.
|
// Form a unique name to taint log lines to be collected.
|
||||||
// Replace '-' characters with '_' to prevent the analyzer from breaking apart names.
|
// Replace '-' characters with '_' to prevent the analyzer from breaking apart names.
|
||||||
taintName := strings.Replace(ns+name, "-", "_", -1)
|
taintName := strings.Replace(ns+name, "-", "_", -1)
|
||||||
|
|
||||||
@ -240,7 +240,7 @@ func ClusterLevelLoggingWithElasticsearch(f *Framework) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Wait for the syntehtic logging pods to finish.
|
// Wait for the synthetic logging pods to finish.
|
||||||
By("Waiting for the pods to succeed.")
|
By("Waiting for the pods to succeed.")
|
||||||
for _, pod := range podNames {
|
for _, pod := range podNames {
|
||||||
err = waitForPodSuccessInNamespace(f.Client, pod, "synth-logger", ns)
|
err = waitForPodSuccessInNamespace(f.Client, pod, "synth-logger", ns)
|
||||||
|
@ -95,7 +95,7 @@ var _ = Describe("hostPath", func() {
|
|||||||
fmt.Sprintf("--retry_time=%d", retryDuration),
|
fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||||
}
|
}
|
||||||
//Read the content of the file with the second container to
|
//Read the content of the file with the second container to
|
||||||
//verify volumes being shared properly among continers within the pod.
|
//verify volumes being shared properly among containers within the pod.
|
||||||
testContainerOutput("hostPath r/w", c, pod, 1, []string{
|
testContainerOutput("hostPath r/w", c, pod, 1, []string{
|
||||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||||
}, namespace.Name,
|
}, namespace.Name,
|
||||||
|
@ -217,7 +217,7 @@ func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, comp
|
|||||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"}
|
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"}
|
||||||
case "randomlySucceedOrFail":
|
case "randomlySucceedOrFail":
|
||||||
// Bash's $RANDOM generates pseudorandom int in range 0 - 32767.
|
// Bash's $RANDOM generates pseudorandom int in range 0 - 32767.
|
||||||
// Dividing by 16384 gives roughly 50/50 chance of succeess.
|
// Dividing by 16384 gives roughly 50/50 chance of success.
|
||||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit $(( $RANDOM / 16384 ))"}
|
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit $(( $RANDOM / 16384 ))"}
|
||||||
}
|
}
|
||||||
return job
|
return job
|
||||||
|
@ -1116,7 +1116,7 @@ func validateReplicationControllerConfiguration(rc api.ReplicationController) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getUDData creates a validator function based on the input string (i.e. kitten.jpg).
|
// getUDData creates a validator function based on the input string (i.e. kitten.jpg).
|
||||||
// For example, if you send "kitten.jpg", this function veridies that the image jpg = kitten.jpg
|
// For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg
|
||||||
// in the container's json field.
|
// in the container's json field.
|
||||||
func getUDData(jpgExpected string, ns string) func(*client.Client, string) error {
|
func getUDData(jpgExpected string, ns string) func(*client.Client, string) error {
|
||||||
|
|
||||||
|
@ -147,7 +147,7 @@ func HighLatencyKubeletOperations(c *client.Client, threshold time.Duration, nod
|
|||||||
return badMetrics, nil
|
return badMetrics, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getContainerInfo contacts kubelet for the container informaton. The "Stats"
|
// getContainerInfo contacts kubelet for the container information. The "Stats"
|
||||||
// in the returned ContainerInfo is subject to the requirements in statsRequest.
|
// in the returned ContainerInfo is subject to the requirements in statsRequest.
|
||||||
func getContainerInfo(c *client.Client, nodeName string, req *kubelet.StatsRequest) (map[string]cadvisorapi.ContainerInfo, error) {
|
func getContainerInfo(c *client.Client, nodeName string, req *kubelet.StatsRequest) (map[string]cadvisorapi.ContainerInfo, error) {
|
||||||
reqBody, err := json.Marshal(req)
|
reqBody, err := json.Marshal(req)
|
||||||
@ -214,14 +214,14 @@ func (r *containerResourceUsage) isStrictlyGreaterThan(rhs *containerResourceUsa
|
|||||||
// cpuInterval.
|
// cpuInterval.
|
||||||
// The acceptable range of the interval is 2s~120s. Be warned that as the
|
// The acceptable range of the interval is 2s~120s. Be warned that as the
|
||||||
// interval (and #containers) increases, the size of kubelet's response
|
// interval (and #containers) increases, the size of kubelet's response
|
||||||
// could be sigificant. E.g., the 60s interval stats for ~20 containers is
|
// could be significant. E.g., the 60s interval stats for ~20 containers is
|
||||||
// ~1.5MB. Don't hammer the node with frequent, heavy requests.
|
// ~1.5MB. Don't hammer the node with frequent, heavy requests.
|
||||||
//
|
//
|
||||||
// cadvisor records cumulative cpu usage in nanoseconds, so we need to have two
|
// cadvisor records cumulative cpu usage in nanoseconds, so we need to have two
|
||||||
// stats points to compute the cpu usage over the interval. Assuming cadvisor
|
// stats points to compute the cpu usage over the interval. Assuming cadvisor
|
||||||
// polls every second, we'd need to get N stats points for N-second interval.
|
// polls every second, we'd need to get N stats points for N-second interval.
|
||||||
// Note that this is an approximation and may not be accurate, hence we also
|
// Note that this is an approximation and may not be accurate, hence we also
|
||||||
// write the actual interval used for calcuation (based on the timestampes of
|
// write the actual interval used for calculation (based on the timestamps of
|
||||||
// the stats points in containerResourceUsage.CPUInterval.
|
// the stats points in containerResourceUsage.CPUInterval.
|
||||||
func getOneTimeResourceUsageOnNode(c *client.Client, nodeName string, cpuInterval time.Duration) (map[string]*containerResourceUsage, error) {
|
func getOneTimeResourceUsageOnNode(c *client.Client, nodeName string, cpuInterval time.Duration) (map[string]*containerResourceUsage, error) {
|
||||||
numStats := int(float64(cpuInterval.Seconds()) / cadvisorStatsPollingIntervalInSeconds)
|
numStats := int(float64(cpuInterval.Seconds()) / cadvisorStatsPollingIntervalInSeconds)
|
||||||
@ -367,7 +367,7 @@ func newResourceCollector(c *client.Client, nodeName string, containerNames []st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts a goroutine to poll the node every pollingInerval.
|
// Start starts a goroutine to poll the node every pollingInterval.
|
||||||
func (r *resourceCollector) Start() {
|
func (r *resourceCollector) Start() {
|
||||||
r.stopCh = make(chan struct{}, 1)
|
r.stopCh = make(chan struct{}, 1)
|
||||||
// Keep the last observed stats for comparison.
|
// Keep the last observed stats for comparison.
|
||||||
|
@ -184,10 +184,10 @@ func (config *KubeProxyTestConfig) hitNodePort(epCount int) {
|
|||||||
By("dialing(http) endpoint container --> node1:nodeHttpPort")
|
By("dialing(http) endpoint container --> node1:nodeHttpPort")
|
||||||
config.dialFromEndpointContainer("http", node1_IP, nodeHttpPort, tries, epCount)
|
config.dialFromEndpointContainer("http", node1_IP, nodeHttpPort, tries, epCount)
|
||||||
|
|
||||||
// TODO: doesnt work because masquerading is not done
|
// TODO: doesn't work because masquerading is not done
|
||||||
By("TODO: Test disabled. dialing(udp) node --> 127.0.0.1:nodeUdpPort")
|
By("TODO: Test disabled. dialing(udp) node --> 127.0.0.1:nodeUdpPort")
|
||||||
//config.dialFromNode("udp", "127.0.0.1", nodeUdpPort, tries, epCount)
|
//config.dialFromNode("udp", "127.0.0.1", nodeUdpPort, tries, epCount)
|
||||||
// TODO: doesnt work because masquerading is not done
|
// TODO: doesn't work because masquerading is not done
|
||||||
By("Test disabled. dialing(http) node --> 127.0.0.1:nodeHttpPort")
|
By("Test disabled. dialing(http) node --> 127.0.0.1:nodeHttpPort")
|
||||||
//config.dialFromNode("http", "127.0.0.1", nodeHttpPort, tries, epCount)
|
//config.dialFromNode("http", "127.0.0.1", nodeHttpPort, tries, epCount)
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
|
|||||||
// rather than an explicit name is preferred because the names will typically have
|
// rather than an explicit name is preferred because the names will typically have
|
||||||
// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
|
// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
|
||||||
// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
|
// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
|
||||||
// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
|
// situation when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
|
||||||
// is running (which would be an error except during a rolling update).
|
// is running (which would be an error except during a rolling update).
|
||||||
for _, rcLabel := range rcLabels {
|
for _, rcLabel := range rcLabels {
|
||||||
rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector(), fields.Everything())
|
rcList, err := c.ReplicationControllers(api.NamespaceSystem).List(labels.Set{"k8s-app": rcLabel}.AsSelector(), fields.Everything())
|
||||||
|
@ -326,7 +326,7 @@ func createPD() (string, error) {
|
|||||||
pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID()))
|
pdName := fmt.Sprintf("%s-%s", testContext.prefix, string(util.NewUUID()))
|
||||||
|
|
||||||
zone := testContext.CloudConfig.Zone
|
zone := testContext.CloudConfig.Zone
|
||||||
// TODO: make this hit the compute API directly instread of shelling out to gcloud.
|
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||||
err := exec.Command("gcloud", "compute", "--quiet", "--project="+testContext.CloudConfig.ProjectID, "disks", "create", "--zone="+zone, "--size=10GB", pdName).Run()
|
err := exec.Command("gcloud", "compute", "--quiet", "--project="+testContext.CloudConfig.ProjectID, "disks", "create", "--zone="+zone, "--size=10GB", pdName).Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -33,7 +33,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
// How long each node is given during a process that restarts all nodes
|
// How long each node is given during a process that restarts all nodes
|
||||||
// before the test is considered failed. (Note that the total time to
|
// before the test is considered failed. (Note that the total time to
|
||||||
// restart all nodes will be this number times the nubmer of nodes.)
|
// restart all nodes will be this number times the number of nodes.)
|
||||||
restartPerNodeTimeout = 5 * time.Minute
|
restartPerNodeTimeout = 5 * time.Minute
|
||||||
|
|
||||||
// How often to poll the statues of a restart.
|
// How often to poll the statues of a restart.
|
||||||
|
@ -69,7 +69,7 @@ type LBCTester interface {
|
|||||||
getName() string
|
getName() string
|
||||||
}
|
}
|
||||||
|
|
||||||
// haproxyControllerTester implementes LBCTester for bare metal haproxy LBs.
|
// haproxyControllerTester implements LBCTester for bare metal haproxy LBs.
|
||||||
type haproxyControllerTester struct {
|
type haproxyControllerTester struct {
|
||||||
client *client.Client
|
client *client.Client
|
||||||
cfg string
|
cfg string
|
||||||
@ -174,7 +174,7 @@ func (s *ingManager) start(namespace string) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Create services.
|
// Create services.
|
||||||
// Note that it's upto the caller to make sure the service actually matches
|
// Note that it's up to the caller to make sure the service actually matches
|
||||||
// the pods of the rc.
|
// the pods of the rc.
|
||||||
for _, svcPath := range s.svcCfgPaths {
|
for _, svcPath := range s.svcCfgPaths {
|
||||||
svc := svcFromManifest(svcPath)
|
svc := svcFromManifest(svcPath)
|
||||||
|
@ -560,7 +560,7 @@ func deleteNS(c *client.Client, namespace string, timeout time.Duration) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// a timeout occured
|
// a timeout occurred
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if missingTimestamp {
|
if missingTimestamp {
|
||||||
return fmt.Errorf("namespace %s was not deleted within limit: %v, some pods were not marked with a deletion timestamp, pods remaining: %v", namespace, err, remaining)
|
return fmt.Errorf("namespace %s was not deleted within limit: %v, some pods were not marked with a deletion timestamp, pods remaining: %v", namespace, err, remaining)
|
||||||
@ -888,7 +888,7 @@ func loadClient() (*client.Client, error) {
|
|||||||
// TODO: Allow service names to have the same form as names
|
// TODO: Allow service names to have the same form as names
|
||||||
// for pods and replication controllers so we don't
|
// for pods and replication controllers so we don't
|
||||||
// need to use such a function and can instead
|
// need to use such a function and can instead
|
||||||
// use the UUID utilty function.
|
// use the UUID utility function.
|
||||||
func randomSuffix() string {
|
func randomSuffix() string {
|
||||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
return strconv.Itoa(r.Int() % 10000)
|
return strconv.Itoa(r.Int() % 10000)
|
||||||
|
Loading…
Reference in New Issue
Block a user